mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 09:11:26 +03:00
pva: mirror from gitlab cv/pva-sys-sw
Gitlab commit 0d38bbb765943 ("Revert "deploy: Update NvSCI p...")
Changes since last deployment:
0d38bbb7: Revert "deploy: Update NvSCI path in tegra tree"
2201a3ec: Revert "deploy: update nvidia-oot path"
Forked changes (dropped):
54b5ea83: Revert "deploy: Update NvSCI path in tegra tree"
1d42b346: Revert "deploy: update nvidia-oot path"
5819cf8b: Fix Misra defect of type 5.1
d9561abf: Fw: Fix Misra defects
ec8941c0: coverity: Qnx safety upload job timout increase
66fb1410: Fix pva_waiter_thread_func in umd tests.
0f06989c: Misra defect fixes in QNX KMD
76532814: Static defect fixes
1cbd5a36: Fix Misra defects
0deee89b: Fix misra defects
60335a28: Fix Misra defects
dbd5301d: Port pva_intf_test to pva_umd_tests
Change-Id: Ia830a1095993ca863ed649b44bd3d3930894389c
Signed-off-by: nanwa <nanwa@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3508954
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Shaurya Garg <shauryag@nvidia.com>
This commit is contained in:
@@ -82,8 +82,8 @@ struct pva_dma_transfer_attr {
|
|||||||
/** When dynamic slot flag is set, it means the memory location will be
|
/** When dynamic slot flag is set, it means the memory location will be
|
||||||
* relocated by commands.
|
* relocated by commands.
|
||||||
*/
|
*/
|
||||||
#define PVA_DMA_DYNAMIC_SLOT ((uint16_t)1U << 15)
|
#define PVA_DMA_DYNAMIC_SLOT (1U << 15)
|
||||||
#define PVA_DMA_STATIC_SLOT ((uint16_t)1U << 14)
|
#define PVA_DMA_STATIC_SLOT (1U << 14)
|
||||||
#define PVA_DMA_SLOT_INVALID 0
|
#define PVA_DMA_SLOT_INVALID 0
|
||||||
#define PVA_DMA_SLOT_ID_MASK 0xFFU
|
#define PVA_DMA_SLOT_ID_MASK 0xFFU
|
||||||
#define PVA_DMA_MAX_NUM_SLOTS 256
|
#define PVA_DMA_MAX_NUM_SLOTS 256
|
||||||
|
|||||||
@@ -6,13 +6,13 @@
|
|||||||
#include "pva_kmd_regs.h"
|
#include "pva_kmd_regs.h"
|
||||||
#include "pva_kmd_silicon_utils.h"
|
#include "pva_kmd_silicon_utils.h"
|
||||||
|
|
||||||
void pva_kmd_abort_fw(struct pva_kmd_device *pva, enum pva_error error_code)
|
void pva_kmd_abort_fw(struct pva_kmd_device *pva, uint32_t error_code)
|
||||||
{
|
{
|
||||||
// HW watchdog may fire repeatedly if PVA is hung. Therefore, disable all
|
// HW watchdog may fire repeatedly if PVA is hung. Therefore, disable all
|
||||||
// interrupts to protect KMD from potential interrupt floods.
|
// interrupts to protect KMD from potential interrupt floods.
|
||||||
pva_kmd_disable_all_interrupts_nosync(pva);
|
pva_kmd_disable_all_interrupts_nosync(pva);
|
||||||
|
|
||||||
pva_kmd_report_error_fsi(pva, (uint32_t)error_code);
|
pva_kmd_report_error_fsi(pva, error_code);
|
||||||
// We will handle firmware reboot after all contexts are closed and a new
|
// We will handle firmware reboot after all contexts are closed and a new
|
||||||
// one is re-opened again
|
// one is re-opened again
|
||||||
pva->recovery = true;
|
pva->recovery = true;
|
||||||
|
|||||||
@@ -27,6 +27,6 @@
|
|||||||
* @param[in] error_code Error code indicating the reason for abort
|
* @param[in] error_code Error code indicating the reason for abort
|
||||||
* Valid range: [0 .. UINT32_MAX]
|
* Valid range: [0 .. UINT32_MAX]
|
||||||
*/
|
*/
|
||||||
void pva_kmd_abort_fw(struct pva_kmd_device *pva, enum pva_error error_code);
|
void pva_kmd_abort_fw(struct pva_kmd_device *pva, uint32_t error_code);
|
||||||
|
|
||||||
#endif //PVA_KMD_ABORT_H
|
#endif //PVA_KMD_ABORT_H
|
||||||
|
|||||||
@@ -545,7 +545,7 @@ static inline void pva_kmd_set_cmd_update_resource_table(
|
|||||||
cmd->header.opcode = PVA_CMD_OPCODE_UPDATE_RESOURCE_TABLE;
|
cmd->header.opcode = PVA_CMD_OPCODE_UPDATE_RESOURCE_TABLE;
|
||||||
cmd->header.len = (uint8_t)(sizeof(*cmd) / sizeof(uint32_t));
|
cmd->header.len = (uint8_t)(sizeof(*cmd) / sizeof(uint32_t));
|
||||||
/* resource_table_id field is uint8_t - bounded by CCQ ID (max 7) */
|
/* resource_table_id field is uint8_t - bounded by CCQ ID (max 7) */
|
||||||
ASSERT(resource_table_id <= (uint32_t)U8_MAX);
|
ASSERT(resource_table_id <= U8_MAX);
|
||||||
cmd->resource_table_id = (uint8_t)resource_table_id;
|
cmd->resource_table_id = (uint8_t)resource_table_id;
|
||||||
cmd->resource_id = resource_id;
|
cmd->resource_id = resource_id;
|
||||||
cmd->entry = *entry;
|
cmd->entry = *entry;
|
||||||
@@ -623,16 +623,14 @@ static inline void pva_kmd_set_cmd_resume_fw(struct pva_cmd_resume_fw *cmd)
|
|||||||
|
|
||||||
static inline void pva_kmd_set_cmd_init_shared_dram_buffer(
|
static inline void pva_kmd_set_cmd_init_shared_dram_buffer(
|
||||||
struct pva_cmd_init_shared_dram_buffer *cmd, uint8_t interface,
|
struct pva_cmd_init_shared_dram_buffer *cmd, uint8_t interface,
|
||||||
uint64_t buffer_iova, uint64_t buffer_size)
|
uint32_t buffer_iova, uint32_t buffer_size)
|
||||||
{
|
{
|
||||||
(void)memset(cmd, 0, sizeof(*cmd));
|
(void)memset(cmd, 0, sizeof(*cmd));
|
||||||
cmd->header.opcode = PVA_CMD_OPCODE_INIT_SHARED_DRAM_BUFFER;
|
cmd->header.opcode = PVA_CMD_OPCODE_INIT_SHARED_DRAM_BUFFER;
|
||||||
cmd->header.len = (uint8_t)(sizeof(*cmd) / sizeof(uint32_t));
|
cmd->header.len = (uint8_t)(sizeof(*cmd) / sizeof(uint32_t));
|
||||||
cmd->buffer_iova_hi = iova_hi(buffer_iova);
|
cmd->buffer_iova_hi = iova_hi(buffer_iova);
|
||||||
cmd->buffer_iova_lo = iova_lo(buffer_iova);
|
cmd->buffer_iova_lo = iova_lo(buffer_iova);
|
||||||
/* CERT INT31-C: Hardware constrains buffer sizes to 32-bit address space */
|
cmd->buffer_size = buffer_size;
|
||||||
ASSERT(buffer_size <= U32_MAX);
|
|
||||||
cmd->buffer_size = (uint32_t)buffer_size;
|
|
||||||
cmd->interface = interface;
|
cmd->interface = interface;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -70,10 +70,7 @@ free_queue_mem:
|
|||||||
pva_kmd_free(ctx->queue_allocator_mem);
|
pva_kmd_free(ctx->queue_allocator_mem);
|
||||||
free_ctx:
|
free_ctx:
|
||||||
pva_kmd_mutex_deinit(&ctx->ocb_lock);
|
pva_kmd_mutex_deinit(&ctx->ocb_lock);
|
||||||
err = pva_kmd_free_block(&pva->context_allocator, alloc_id);
|
(void)pva_kmd_free_block(&pva->context_allocator, alloc_id);
|
||||||
if (err != PVA_SUCCESS) {
|
|
||||||
pva_kmd_log_err("Failed to free context block");
|
|
||||||
}
|
|
||||||
err_out:
|
err_out:
|
||||||
pva_kmd_log_err("Failed to create PVA context");
|
pva_kmd_log_err("Failed to create PVA context");
|
||||||
return NULL;
|
return NULL;
|
||||||
@@ -351,10 +348,7 @@ enum pva_error pva_kmd_context_init(struct pva_kmd_context *ctx,
|
|||||||
return PVA_SUCCESS;
|
return PVA_SUCCESS;
|
||||||
|
|
||||||
deinit_fw_context:
|
deinit_fw_context:
|
||||||
if (PVA_SUCCESS != notify_fw_context_deinit(ctx)) {
|
(void)notify_fw_context_deinit(ctx);
|
||||||
pva_kmd_log_err(
|
|
||||||
"Failed to deinitialize FW context during cleanup");
|
|
||||||
}
|
|
||||||
deinit_submitter:
|
deinit_submitter:
|
||||||
pva_kmd_mutex_deinit(&ctx->chunk_pool_lock);
|
pva_kmd_mutex_deinit(&ctx->chunk_pool_lock);
|
||||||
deinit_submit_lock:
|
deinit_submit_lock:
|
||||||
|
|||||||
@@ -50,11 +50,13 @@ validate_channel_mapping(struct pva_dma_config const *out_cfg,
|
|||||||
enum pva_error err = PVA_SUCCESS;
|
enum pva_error err = PVA_SUCCESS;
|
||||||
|
|
||||||
for (uint8_t i = 0U; i < cfg_hdr->num_channels; i++) {
|
for (uint8_t i = 0U; i < cfg_hdr->num_channels; i++) {
|
||||||
|
uint8_t desc_id;
|
||||||
|
|
||||||
channel = &out_cfg->channels[i];
|
channel = &out_cfg->channels[i];
|
||||||
|
desc_id = safe_addu8(channel->desc_index,
|
||||||
|
out_cfg->header.base_descriptor);
|
||||||
if ((channel->desc_index >= out_cfg->header.num_descriptors) ||
|
if ((channel->desc_index >= out_cfg->header.num_descriptors) ||
|
||||||
(pva_is_reserved_desc(addu8(channel->desc_index,
|
(pva_is_reserved_desc(desc_id))) {
|
||||||
cfg_hdr->base_descriptor,
|
|
||||||
&math_err)))) {
|
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"ERR: Invalid Channel Descriptor Index");
|
"ERR: Invalid Channel Descriptor Index");
|
||||||
return PVA_INVAL;
|
return PVA_INVAL;
|
||||||
@@ -309,8 +311,8 @@ validate_descriptor(const struct pva_dma_descriptor *desc,
|
|||||||
if ((desc->link_desc_id - cfg_hdr->base_descriptor >
|
if ((desc->link_desc_id - cfg_hdr->base_descriptor >
|
||||||
cfg_hdr->num_descriptors) ||
|
cfg_hdr->num_descriptors) ||
|
||||||
((desc->link_desc_id != 0U) &&
|
((desc->link_desc_id != 0U) &&
|
||||||
pva_is_reserved_desc(desc->link_desc_id -
|
pva_is_reserved_desc((uint32_t)desc->link_desc_id -
|
||||||
(uint8_t)PVA_DMA_DESC_ID_BASE))) {
|
(uint32_t)PVA_DMA_DESC_ID_BASE))) {
|
||||||
pva_kmd_log_err("ERR: Invalid linker Desc ID");
|
pva_kmd_log_err("ERR: Invalid linker Desc ID");
|
||||||
return PVA_INVAL;
|
return PVA_INVAL;
|
||||||
}
|
}
|
||||||
@@ -403,8 +405,7 @@ is_dma_config_header_valid(struct pva_ops_dma_config_register const *ops_hdr,
|
|||||||
offsets[0].end = addu32(
|
offsets[0].end = addu32(
|
||||||
ops_hdr->channels_offset,
|
ops_hdr->channels_offset,
|
||||||
align8_u32(mulu32(cfg_hdr->num_channels,
|
align8_u32(mulu32(cfg_hdr->num_channels,
|
||||||
(uint32_t)sizeof(struct pva_dma_channel),
|
sizeof(struct pva_dma_channel), &math_err),
|
||||||
&math_err),
|
|
||||||
&math_err),
|
&math_err),
|
||||||
&math_err);
|
&math_err);
|
||||||
|
|
||||||
@@ -412,25 +413,22 @@ is_dma_config_header_valid(struct pva_ops_dma_config_register const *ops_hdr,
|
|||||||
offsets[1].end = addu32(
|
offsets[1].end = addu32(
|
||||||
ops_hdr->descriptors_offset,
|
ops_hdr->descriptors_offset,
|
||||||
align8_u32(mulu32(cfg_hdr->num_descriptors,
|
align8_u32(mulu32(cfg_hdr->num_descriptors,
|
||||||
(uint32_t)sizeof(struct pva_dma_descriptor),
|
sizeof(struct pva_dma_descriptor), &math_err),
|
||||||
&math_err),
|
|
||||||
&math_err),
|
&math_err),
|
||||||
&math_err);
|
&math_err);
|
||||||
|
|
||||||
offsets[2].start = ops_hdr->hwseq_words_offset;
|
offsets[2].start = ops_hdr->hwseq_words_offset;
|
||||||
offsets[2].end =
|
offsets[2].end = addu32(ops_hdr->hwseq_words_offset,
|
||||||
addu32(ops_hdr->hwseq_words_offset,
|
align8_u32(mulu32(cfg_hdr->num_hwseq_words,
|
||||||
align8_u32(mulu32(cfg_hdr->num_hwseq_words,
|
sizeof(uint32_t), &math_err),
|
||||||
(uint32_t)sizeof(uint32_t), &math_err),
|
&math_err),
|
||||||
&math_err),
|
&math_err);
|
||||||
&math_err);
|
|
||||||
|
|
||||||
offsets[3].start = ops_hdr->static_bindings_offset;
|
offsets[3].start = ops_hdr->static_bindings_offset;
|
||||||
offsets[3].end =
|
offsets[3].end =
|
||||||
addu32(ops_hdr->static_bindings_offset,
|
addu32(ops_hdr->static_bindings_offset,
|
||||||
align8_u32(mulu32(cfg_hdr->num_static_slots,
|
align8_u32(mulu32(cfg_hdr->num_static_slots,
|
||||||
(uint32_t)sizeof(
|
sizeof(struct pva_dma_static_binding),
|
||||||
struct pva_dma_static_binding),
|
|
||||||
&math_err),
|
&math_err),
|
||||||
&math_err),
|
&math_err),
|
||||||
&math_err);
|
&math_err);
|
||||||
@@ -488,13 +486,15 @@ validate_descriptors(const struct pva_dma_config *dma_config,
|
|||||||
const struct pva_dma_config_header *cfg_hdr = &dma_config->header;
|
const struct pva_dma_config_header *cfg_hdr = &dma_config->header;
|
||||||
const struct pva_dma_descriptor *desc;
|
const struct pva_dma_descriptor *desc;
|
||||||
bool relax_dim3_check = true;
|
bool relax_dim3_check = true;
|
||||||
uint8_t desc_id;
|
|
||||||
pva_math_error math_err = MATH_OP_SUCCESS;
|
|
||||||
|
|
||||||
for (i = 0U; i < cfg_hdr->num_descriptors; i++) {
|
for (i = 0U; i < cfg_hdr->num_descriptors; i++) {
|
||||||
desc_id = addu8(i, cfg_hdr->base_descriptor, &math_err);
|
uint8_t desc_id =
|
||||||
|
safe_addu8((uint8_t)i, cfg_hdr->base_descriptor);
|
||||||
|
|
||||||
if (pva_is_reserved_desc(desc_id)) {
|
if (pva_is_reserved_desc(desc_id)) {
|
||||||
// skip over the reserved descriptor range
|
// skip over the reserved descriptor range
|
||||||
|
i = safe_subu8(PVA_RESERVED_DESCRIPTORS_END,
|
||||||
|
dma_config->header.base_descriptor);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -509,11 +509,6 @@ validate_descriptors(const struct pva_dma_config *dma_config,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (math_err != MATH_OP_SUCCESS) {
|
|
||||||
pva_kmd_log_err("validate_descriptors math error");
|
|
||||||
return PVA_ERR_MATH_OP;
|
|
||||||
}
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -735,7 +730,7 @@ static void update_reloc_count(uint16_t slot, uint8_t transfer_mode,
|
|||||||
struct pva_fw_dma_slot *out_dyn_slots,
|
struct pva_fw_dma_slot *out_dyn_slots,
|
||||||
uint16_t num_dyn_slots, bool is_dst)
|
uint16_t num_dyn_slots, bool is_dst)
|
||||||
{
|
{
|
||||||
uint16_t slot_id = get_slot_id(slot);
|
uint8_t slot_id = get_slot_id(slot);
|
||||||
|
|
||||||
if ((slot & PVA_DMA_DYNAMIC_SLOT) != 0U) {
|
if ((slot & PVA_DMA_DYNAMIC_SLOT) != 0U) {
|
||||||
out_dyn_slots[slot_id].reloc_count =
|
out_dyn_slots[slot_id].reloc_count =
|
||||||
@@ -756,41 +751,32 @@ static void count_relocs(struct pva_dma_config const *dma_cfg,
|
|||||||
struct pva_fw_dma_slot *out_dyn_slots,
|
struct pva_fw_dma_slot *out_dyn_slots,
|
||||||
uint16_t num_dyn_slots)
|
uint16_t num_dyn_slots)
|
||||||
{
|
{
|
||||||
uint8_t i = 0U;
|
uint8_t i;
|
||||||
const struct pva_dma_descriptor *desc;
|
const struct pva_dma_descriptor *desc;
|
||||||
|
|
||||||
/* CERT INT30-C: Use safe arithmetic to prevent potential wrap */
|
for (i = 0U; i < dma_cfg->header.num_descriptors; i++) {
|
||||||
while (i < dma_cfg->header.num_descriptors) {
|
if (pva_is_reserved_desc(i + dma_cfg->header.base_descriptor)) {
|
||||||
uint8_t desc_id =
|
|
||||||
safe_addu8(i, dma_cfg->header.base_descriptor);
|
|
||||||
if (pva_is_reserved_desc(desc_id)) {
|
|
||||||
// skip over the reserved descriptor range
|
// skip over the reserved descriptor range
|
||||||
/* CERT INT31-C: Use safe subtract to handle underflow gracefully */
|
i = PVA_RESERVED_DESCRIPTORS_END -
|
||||||
i = safe_subu8(PVA_RESERVED_DESCRIPTORS_END,
|
dma_cfg->header.base_descriptor;
|
||||||
dma_cfg->header.base_descriptor);
|
|
||||||
/* Skip to first descriptor after reserved range */
|
|
||||||
i = safe_addu8(i, 1U);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
desc = &dma_cfg->descriptors[i];
|
desc = &dma_cfg->descriptors[i];
|
||||||
|
|
||||||
update_reloc_count(desc->src.slot, desc->src.transfer_mode,
|
update_reloc_count(desc->src.slot, desc->src.transfer_mode,
|
||||||
(bool)desc->src.cb_enable, out_static_slots,
|
desc->src.cb_enable, out_static_slots,
|
||||||
num_static_slots, out_dyn_slots,
|
num_static_slots, out_dyn_slots,
|
||||||
num_dyn_slots, false);
|
num_dyn_slots, false);
|
||||||
|
|
||||||
update_reloc_count(desc->dst.slot, desc->dst.transfer_mode,
|
update_reloc_count(desc->dst.slot, desc->dst.transfer_mode,
|
||||||
(bool)desc->dst.cb_enable, out_static_slots,
|
desc->dst.cb_enable, out_static_slots,
|
||||||
num_static_slots, out_dyn_slots,
|
num_static_slots, out_dyn_slots,
|
||||||
num_dyn_slots, true);
|
num_dyn_slots, true);
|
||||||
|
|
||||||
update_reloc_count(desc->dst2_slot, desc->dst.transfer_mode,
|
update_reloc_count(desc->dst2_slot, desc->dst.transfer_mode,
|
||||||
(bool)desc->dst.cb_enable, out_static_slots,
|
desc->dst.cb_enable, out_static_slots,
|
||||||
num_static_slots, out_dyn_slots,
|
num_static_slots, out_dyn_slots,
|
||||||
num_dyn_slots, true);
|
num_dyn_slots, true);
|
||||||
|
|
||||||
/* CERT INT30-C: Use safe addition to prevent wrap */
|
|
||||||
i = safe_addu8(i, 1U);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -806,7 +792,7 @@ static void write_one_reloc(uint8_t ch_index, uint32_t desc_index,
|
|||||||
int64_t old_start_addr = info->slots[slot_id].start_addr;
|
int64_t old_start_addr = info->slots[slot_id].start_addr;
|
||||||
int64_t old_end_addr = info->slots[slot_id].end_addr;
|
int64_t old_end_addr = info->slots[slot_id].end_addr;
|
||||||
uint32_t shift_amount = (uint32_t)ch_index & 0x0FU;
|
uint32_t shift_amount = (uint32_t)ch_index & 0x0FU;
|
||||||
uint32_t shift_result = (uint32_t)1U << shift_amount;
|
uint32_t shift_result = 1U << shift_amount;
|
||||||
uint16_t ch_mask_u16;
|
uint16_t ch_mask_u16;
|
||||||
uint16_t new_mask;
|
uint16_t new_mask;
|
||||||
|
|
||||||
@@ -815,13 +801,13 @@ static void write_one_reloc(uint8_t ch_index, uint32_t desc_index,
|
|||||||
info->slots[slot_id].end_addr =
|
info->slots[slot_id].end_addr =
|
||||||
maxs64(access_entry->end_addr, old_end_addr);
|
maxs64(access_entry->end_addr, old_end_addr);
|
||||||
|
|
||||||
ASSERT(shift_result <= (uint32_t)U16_MAX);
|
ASSERT(shift_result <= U16_MAX);
|
||||||
ch_mask_u16 = (uint16_t)shift_result;
|
ch_mask_u16 = (uint16_t)shift_result;
|
||||||
new_mask = info->slots[slot_id].ch_use_mask | ch_mask_u16;
|
new_mask = info->slots[slot_id].ch_use_mask | ch_mask_u16;
|
||||||
info->slots[slot_id].ch_use_mask = new_mask;
|
info->slots[slot_id].ch_use_mask = new_mask;
|
||||||
|
|
||||||
/* desc_index field is uint8_t - validated by DMA config validation */
|
/* desc_index field is uint8_t - validated by DMA config validation */
|
||||||
ASSERT(desc_index <= (uint32_t)U8_MAX);
|
ASSERT(desc_index <= U8_MAX);
|
||||||
info->relocs[reloc_id].desc_index = (uint8_t)desc_index;
|
info->relocs[reloc_id].desc_index = (uint8_t)desc_index;
|
||||||
info->relocs[reloc_id].field = reloc_field;
|
info->relocs[reloc_id].field = reloc_field;
|
||||||
info->reloc_off[slot_id] = safe_addu8(info->reloc_off[slot_id], 1U);
|
info->reloc_off[slot_id] = safe_addu8(info->reloc_off[slot_id], 1U);
|
||||||
@@ -868,6 +854,8 @@ static void write_relocs(const struct pva_dma_config *dma_cfg,
|
|||||||
if (pva_is_reserved_desc(
|
if (pva_is_reserved_desc(
|
||||||
safe_addu8(i, dma_cfg->header.base_descriptor))) {
|
safe_addu8(i, dma_cfg->header.base_descriptor))) {
|
||||||
// skip over the reserved descriptor range
|
// skip over the reserved descriptor range
|
||||||
|
i = safe_subu8(PVA_RESERVED_DESCRIPTORS_END,
|
||||||
|
dma_cfg->header.base_descriptor);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
desc = &dma_cfg->descriptors[i];
|
desc = &dma_cfg->descriptors[i];
|
||||||
@@ -975,20 +963,17 @@ static enum pva_error get_access_size(const struct pva_dma_descriptor *desc,
|
|||||||
end += adds64((int64_t)dim_offset_U, (int64_t)tx, &math_err);
|
end += adds64((int64_t)dim_offset_U, (int64_t)tx, &math_err);
|
||||||
|
|
||||||
// 3rd dim
|
// 3rd dim
|
||||||
dim_offset =
|
dim_offset = muls32((attr->adv1), (int32_t)(attr->rpt1), &math_err);
|
||||||
muls32((int32_t)(attr->adv1), (int32_t)(attr->rpt1), &math_err);
|
|
||||||
start += mins32(dim_offset, 0);
|
start += mins32(dim_offset, 0);
|
||||||
end += maxs32(dim_offset, 0);
|
end += maxs32(dim_offset, 0);
|
||||||
|
|
||||||
// 4th dim
|
// 4th dim
|
||||||
dim_offset =
|
dim_offset = muls32((attr->adv2), (int32_t)(attr->rpt2), &math_err);
|
||||||
muls32((int32_t)(attr->adv2), (int32_t)(attr->rpt2), &math_err);
|
|
||||||
start += mins32(dim_offset, 0);
|
start += mins32(dim_offset, 0);
|
||||||
end += maxs32(dim_offset, 0);
|
end += maxs32(dim_offset, 0);
|
||||||
|
|
||||||
// 5th dim
|
// 5th dim
|
||||||
dim_offset =
|
dim_offset = muls32((attr->adv3), (int32_t)(attr->rpt3), &math_err);
|
||||||
muls32((int32_t)(attr->adv3), (int32_t)(attr->rpt3), &math_err);
|
|
||||||
start += mins32(dim_offset, 0);
|
start += mins32(dim_offset, 0);
|
||||||
end += maxs32(dim_offset, 0);
|
end += maxs32(dim_offset, 0);
|
||||||
// convert to byte range
|
// convert to byte range
|
||||||
@@ -1096,8 +1081,8 @@ void pva_kmd_collect_relocs(struct pva_dma_config const *dma_cfg,
|
|||||||
count_relocs(dma_cfg, out_static_slots, num_static_slots, out_dyn_slots,
|
count_relocs(dma_cfg, out_static_slots, num_static_slots, out_dyn_slots,
|
||||||
num_dyn_slots);
|
num_dyn_slots);
|
||||||
|
|
||||||
(void)memset(static_reloc_off, 0, sizeof(static_reloc_off));
|
(void)memset(static_reloc_off, 0U, sizeof(static_reloc_off));
|
||||||
(void)memset(dyn_reloc_off, 0, sizeof(dyn_reloc_off));
|
(void)memset(dyn_reloc_off, 0U, sizeof(dyn_reloc_off));
|
||||||
|
|
||||||
rel_info.dyn_slot.slots = out_dyn_slots;
|
rel_info.dyn_slot.slots = out_dyn_slots;
|
||||||
rel_info.dyn_slot.relocs = out_dyn_relocs;
|
rel_info.dyn_slot.relocs = out_dyn_relocs;
|
||||||
|
|||||||
@@ -16,12 +16,8 @@ static void write_dma_channel(struct pva_dma_channel const *ch,
|
|||||||
bool support_hwseq_frame_linking)
|
bool support_hwseq_frame_linking)
|
||||||
{
|
{
|
||||||
/* DMA_CHANNEL_CNTL0_CHSDID: DMA_CHANNEL_CNTL0[0] = descIndex + 1;*/
|
/* DMA_CHANNEL_CNTL0_CHSDID: DMA_CHANNEL_CNTL0[0] = descIndex + 1;*/
|
||||||
uint8_t desc_sum_u8 =
|
uint32_t desc_sum = ch->desc_index + base_desc_index + 1U;
|
||||||
safe_addu8(safe_addu8(ch->desc_index, base_desc_index), 1U);
|
fw_ch->cntl0 = (desc_sum & 0xFFU) << 0U;
|
||||||
uint8_t cntl1_val;
|
|
||||||
uint16_t hwseqcntl_val;
|
|
||||||
|
|
||||||
fw_ch->cntl0 = (uint32_t)desc_sum_u8;
|
|
||||||
|
|
||||||
/* DMA_CHANNEL_CNTL0_CHVMEMOREQ */
|
/* DMA_CHANNEL_CNTL0_CHVMEMOREQ */
|
||||||
fw_ch->cntl0 |= (((uint32_t)ch->vdb_count & 0xFFU) << 8U);
|
fw_ch->cntl0 |= (((uint32_t)ch->vdb_count & 0xFFU) << 8U);
|
||||||
@@ -33,8 +29,7 @@ static void write_dma_channel(struct pva_dma_channel const *ch,
|
|||||||
fw_ch->cntl0 |= (((uint32_t)ch->prefetch_enable & 1U) << 30U);
|
fw_ch->cntl0 |= (((uint32_t)ch->prefetch_enable & 1U) << 30U);
|
||||||
|
|
||||||
/* DMA_CHANNEL_CNTL1_CHPWT */
|
/* DMA_CHANNEL_CNTL1_CHPWT */
|
||||||
cntl1_val = (ch->req_per_grant & 0x7U) << 2U;
|
fw_ch->cntl1 = (ch->req_per_grant & 0x7U) << 2U;
|
||||||
fw_ch->cntl1 = (uint32_t)cntl1_val;
|
|
||||||
|
|
||||||
/* DMA_CHANNEL_CNTL1_CHVDBSTART */
|
/* DMA_CHANNEL_CNTL1_CHVDBSTART */
|
||||||
fw_ch->cntl1 |= (((uint32_t)ch->vdb_offset & 0x7FU) << 16U);
|
fw_ch->cntl1 |= (((uint32_t)ch->vdb_offset & 0x7FU) << 16U);
|
||||||
@@ -47,8 +42,7 @@ static void write_dma_channel(struct pva_dma_channel const *ch,
|
|||||||
fw_ch->cntl1 |= (((uint32_t)ch->ch_rep_factor & 0x7U) << 8U);
|
fw_ch->cntl1 |= (((uint32_t)ch->ch_rep_factor & 0x7U) << 8U);
|
||||||
|
|
||||||
/* DMA_CHANNEL_HWSEQCNTL_CHHWSEQSTART */
|
/* DMA_CHANNEL_HWSEQCNTL_CHHWSEQSTART */
|
||||||
hwseqcntl_val = (ch->hwseq_start & 0x1FFU) << 0U;
|
fw_ch->hwseqcntl = (ch->hwseq_start & 0x1FFU) << 0U;
|
||||||
fw_ch->hwseqcntl = (uint32_t)hwseqcntl_val;
|
|
||||||
|
|
||||||
/* DMA_CHANNEL_HWSEQCNTL_CHHWSEQEND */
|
/* DMA_CHANNEL_HWSEQCNTL_CHHWSEQEND */
|
||||||
fw_ch->hwseqcntl |= (((uint32_t)ch->hwseq_end & 0x1FFU) << 12U);
|
fw_ch->hwseqcntl |= (((uint32_t)ch->hwseq_end & 0x1FFU) << 12U);
|
||||||
@@ -123,19 +117,12 @@ static void write_dma_descriptor(struct pva_dma_descriptor const *desc,
|
|||||||
/* DMA_DESC_SLP_ADV */
|
/* DMA_DESC_SLP_ADV */
|
||||||
fw_desc->slp_adv = desc->src.line_pitch;
|
fw_desc->slp_adv = desc->src.line_pitch;
|
||||||
/* DMA_DESC_DB_START - lower 16 bits, bit 16 stored in cb_ext */
|
/* DMA_DESC_DB_START - lower 16 bits, bit 16 stored in cb_ext */
|
||||||
/* MISRA C-2023 Rule 10.3: Explicit cast for narrowing conversion */
|
|
||||||
fw_desc->db_start = (uint16_t)(desc->dst.cb_start & 0xFFFFU);
|
fw_desc->db_start = (uint16_t)(desc->dst.cb_start & 0xFFFFU);
|
||||||
|
|
||||||
/* DMA_DESC_DB_SIZE - lower 16 bits, bit 16 stored in cb_ext */
|
/* DMA_DESC_DB_SIZE - lower 16 bits, bit 16 stored in cb_ext */
|
||||||
/* MISRA C-2023 Rule 10.3: Explicit cast for narrowing conversion */
|
|
||||||
fw_desc->db_size = (uint16_t)(desc->dst.cb_size & 0xFFFFU);
|
fw_desc->db_size = (uint16_t)(desc->dst.cb_size & 0xFFFFU);
|
||||||
|
|
||||||
/* DMA_DESC_SB_START - lower 16 bits, bit 16 stored in cb_ext */
|
/* DMA_DESC_SB_START - lower 16 bits, bit 16 stored in cb_ext */
|
||||||
/* MISRA C-2023 Rule 10.3: Explicit cast for narrowing conversion */
|
|
||||||
fw_desc->sb_start = (uint16_t)(desc->src.cb_start & 0xFFFFU);
|
fw_desc->sb_start = (uint16_t)(desc->src.cb_start & 0xFFFFU);
|
||||||
|
|
||||||
/* DMA_DESC_SB_SIZE - lower 16 bits, bit 16 stored in cb_ext */
|
/* DMA_DESC_SB_SIZE - lower 16 bits, bit 16 stored in cb_ext */
|
||||||
/* MISRA C-2023 Rule 10.3: Explicit cast for narrowing conversion */
|
|
||||||
fw_desc->sb_size = (uint16_t)(desc->src.cb_size & 0xFFFFU);
|
fw_desc->sb_size = (uint16_t)(desc->src.cb_size & 0xFFFFU);
|
||||||
/* DMA_DESC_TRIG_CH */
|
/* DMA_DESC_TRIG_CH */
|
||||||
/* Channel events are not supported */
|
/* Channel events are not supported */
|
||||||
@@ -180,7 +167,7 @@ static void write_triggers(struct pva_dma_config const *dma_cfg,
|
|||||||
struct pva_dma_config_resource *fw_cfg,
|
struct pva_dma_config_resource *fw_cfg,
|
||||||
struct pva_dma_resource_map *dma_resource_map)
|
struct pva_dma_resource_map *dma_resource_map)
|
||||||
{
|
{
|
||||||
uint8_t i, j;
|
uint32_t i, j;
|
||||||
bool trigger_required = false;
|
bool trigger_required = false;
|
||||||
|
|
||||||
(void)memset(fw_cfg->output_enable, 0, sizeof(fw_cfg->output_enable));
|
(void)memset(fw_cfg->output_enable, 0, sizeof(fw_cfg->output_enable));
|
||||||
@@ -193,7 +180,7 @@ static void write_triggers(struct pva_dma_config const *dma_cfg,
|
|||||||
ch = &dma_cfg->channels[i];
|
ch = &dma_cfg->channels[i];
|
||||||
/* CERT INT31-C: Hardware constraints ensure num_channels and base_channel
|
/* CERT INT31-C: Hardware constraints ensure num_channels and base_channel
|
||||||
* are bounded such that their sum always fits in uint8_t, safe to cast */
|
* are bounded such that their sum always fits in uint8_t, safe to cast */
|
||||||
ch_num = safe_addu8(i, dma_cfg->header.base_channel);
|
ch_num = i + dma_cfg->header.base_channel;
|
||||||
mask = ch->output_enable_mask;
|
mask = ch->output_enable_mask;
|
||||||
/* READ/STORE triggers */
|
/* READ/STORE triggers */
|
||||||
for (j = 0U; j < 7U; j++) {
|
for (j = 0U; j < 7U; j++) {
|
||||||
@@ -284,7 +271,7 @@ void pva_kmd_write_fw_dma_config(struct pva_dma_config const *dma_cfg,
|
|||||||
|
|
||||||
/* Do not include fields beyond descriptors as they are not fetched to
|
/* Do not include fields beyond descriptors as they are not fetched to
|
||||||
* TCM */
|
* TCM */
|
||||||
*out_fw_fetch_size = (uint32_t)offset;
|
*out_fw_fetch_size = offset;
|
||||||
|
|
||||||
for (i = 0U; i < hdr->num_channels; i++) {
|
for (i = 0U; i < hdr->num_channels; i++) {
|
||||||
write_dma_channel(&dma_cfg->channels[i],
|
write_dma_channel(&dma_cfg->channels[i],
|
||||||
@@ -294,8 +281,9 @@ void pva_kmd_write_fw_dma_config(struct pva_dma_config const *dma_cfg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0U; i < dma_cfg->header.num_descriptors; i++) {
|
for (i = 0U; i < dma_cfg->header.num_descriptors; i++) {
|
||||||
if (pva_is_reserved_desc(
|
uint8_t desc_id =
|
||||||
safe_addu8(i, dma_cfg->header.base_descriptor))) {
|
safe_addu8((uint8_t)i, dma_cfg->header.base_descriptor);
|
||||||
|
if (pva_is_reserved_desc(desc_id)) {
|
||||||
// skip over the reserved descriptor range
|
// skip over the reserved descriptor range
|
||||||
i = safe_subu8(PVA_RESERVED_DESCRIPTORS_END,
|
i = safe_subu8(PVA_RESERVED_DESCRIPTORS_END,
|
||||||
dma_cfg->header.base_descriptor);
|
dma_cfg->header.base_descriptor);
|
||||||
|
|||||||
@@ -1302,7 +1302,7 @@ out:
|
|||||||
static enum pva_error validate_rra_mode(struct pva_hwseq_priv *hwseq_info,
|
static enum pva_error validate_rra_mode(struct pva_hwseq_priv *hwseq_info,
|
||||||
uint64_t *hw_dma_descs_mask)
|
uint64_t *hw_dma_descs_mask)
|
||||||
{
|
{
|
||||||
const uint8_t *column = NULL;
|
const uint8_t *column = 0U;
|
||||||
uint32_t i = 0U;
|
uint32_t i = 0U;
|
||||||
uint32_t num_columns = 0U;
|
uint32_t num_columns = 0U;
|
||||||
uint32_t end = hwseq_info->entry.ch->hwseq_end;
|
uint32_t end = hwseq_info->entry.ch->hwseq_end;
|
||||||
|
|||||||
@@ -293,6 +293,7 @@ static enum pva_error pva_kmd_op_executable_register_async(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* CERT INT31-C: exec_size validated to fit in uint32_t, safe to cast */
|
||||||
err = add_vpu_resource_and_get_symbols(ctx, exec_data,
|
err = add_vpu_resource_and_get_symbols(ctx, exec_data,
|
||||||
(uint32_t)args->exec_size,
|
(uint32_t)args->exec_size,
|
||||||
&resource_id, &num_symbols,
|
&resource_id, &num_symbols,
|
||||||
@@ -616,11 +617,19 @@ pva_kmd_async_ops_handler(struct pva_kmd_context *ctx,
|
|||||||
err = PVA_INVAL;
|
err = PVA_INVAL;
|
||||||
goto exit_loop;
|
goto exit_loop;
|
||||||
}
|
}
|
||||||
|
/* Validate that opcode fits in uint32_t before casting */
|
||||||
|
if (header->opcode > U32_MAX) {
|
||||||
|
pva_kmd_log_err(
|
||||||
|
"pva_kmd_async_ops_handler: Opcode exceeds U32_MAX");
|
||||||
|
err = PVA_INVAL;
|
||||||
|
goto exit_loop;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Check if the operation is allowed by the DVMS.
|
* Check if the operation is allowed by the DVMS.
|
||||||
* If not, return an error.
|
* If not, return an error.
|
||||||
*/
|
*/
|
||||||
if (!pva_kmd_is_ops_allowed(ctx, header->opcode)) {
|
/* CERT INT31-C: opcode validated to fit in uint32_t, safe to cast */
|
||||||
|
if (!pva_kmd_is_ops_allowed(ctx, (uint32_t)header->opcode)) {
|
||||||
err = PVA_NO_PERM;
|
err = PVA_NO_PERM;
|
||||||
goto exit_loop;
|
goto exit_loop;
|
||||||
}
|
}
|
||||||
@@ -680,7 +689,7 @@ pva_kmd_op_context_init(struct pva_kmd_context *ctx, const void *input_buffer,
|
|||||||
ctx_init_out.max_cmdbuf_chunk_size =
|
ctx_init_out.max_cmdbuf_chunk_size =
|
||||||
pva_kmd_get_max_cmdbuf_chunk_size(ctx->pva);
|
pva_kmd_get_max_cmdbuf_chunk_size(ctx->pva);
|
||||||
|
|
||||||
produce_data(out_buffer, &ctx_init_out, (uint32_t)sizeof(ctx_init_out));
|
produce_data(out_buffer, &ctx_init_out, sizeof(ctx_init_out));
|
||||||
|
|
||||||
return PVA_SUCCESS;
|
return PVA_SUCCESS;
|
||||||
}
|
}
|
||||||
@@ -724,7 +733,7 @@ pva_kmd_op_queue_create(struct pva_kmd_context *ctx, const void *input_buffer,
|
|||||||
|
|
||||||
/* CERT INT31-C: queue_id validated to fit in uint8_t, safe to cast */
|
/* CERT INT31-C: queue_id validated to fit in uint8_t, safe to cast */
|
||||||
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx->pva, ctx->ccq_id,
|
syncpt_info = pva_kmd_queue_get_rw_syncpt_info(ctx->pva, ctx->ccq_id,
|
||||||
(uint8_t)queue_id);
|
queue_id);
|
||||||
queue_out_args.error = err;
|
queue_out_args.error = err;
|
||||||
queue_out_args.queue_id = queue_id;
|
queue_out_args.queue_id = queue_id;
|
||||||
queue_out_args.syncpt_id = syncpt_info->syncpt_id;
|
queue_out_args.syncpt_id = syncpt_info->syncpt_id;
|
||||||
@@ -733,7 +742,7 @@ pva_kmd_op_queue_create(struct pva_kmd_context *ctx, const void *input_buffer,
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
produce_data(out_buffer, &queue_out_args,
|
produce_data(out_buffer, &queue_out_args,
|
||||||
(uint32_t)sizeof(struct pva_ops_response_queue_create));
|
(uint64_t)sizeof(struct pva_ops_response_queue_create));
|
||||||
return PVA_SUCCESS;
|
return PVA_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -762,7 +771,7 @@ pva_kmd_op_queue_destroy(struct pva_kmd_context *ctx, const void *input_buffer,
|
|||||||
pva_kmd_queue_destroy(ctx, queue_destroy_args->queue_id);
|
pva_kmd_queue_destroy(ctx, queue_destroy_args->queue_id);
|
||||||
|
|
||||||
produce_data(out_buffer, &queue_out_args,
|
produce_data(out_buffer, &queue_out_args,
|
||||||
(uint32_t)sizeof(struct pva_ops_response_queue_destroy));
|
(uint64_t)sizeof(struct pva_ops_response_queue_destroy));
|
||||||
|
|
||||||
return PVA_SUCCESS;
|
return PVA_SUCCESS;
|
||||||
}
|
}
|
||||||
@@ -820,7 +829,7 @@ static enum pva_error pva_kmd_op_executable_get_symbols(
|
|||||||
|
|
||||||
sym_out_args.error = PVA_SUCCESS;
|
sym_out_args.error = PVA_SUCCESS;
|
||||||
sym_out_args.num_symbols = rec->vpu_bin.symbol_table.n_symbols;
|
sym_out_args.num_symbols = rec->vpu_bin.symbol_table.n_symbols;
|
||||||
produce_data(out_buffer, &sym_out_args, (uint32_t)sizeof(sym_out_args));
|
produce_data(out_buffer, &sym_out_args, sizeof(sym_out_args));
|
||||||
produce_data(out_buffer, rec->vpu_bin.symbol_table.symbols, table_size);
|
produce_data(out_buffer, rec->vpu_bin.symbol_table.symbols, table_size);
|
||||||
pva_kmd_drop_resource(&ctx->ctx_resource_table,
|
pva_kmd_drop_resource(&ctx->ctx_resource_table,
|
||||||
sym_in_args->exec_resource_id);
|
sym_in_args->exec_resource_id);
|
||||||
@@ -833,7 +842,7 @@ err_drop:
|
|||||||
err_response:
|
err_response:
|
||||||
sym_out_args.error = err;
|
sym_out_args.error = err;
|
||||||
sym_out_args.num_symbols = 0;
|
sym_out_args.num_symbols = 0;
|
||||||
produce_data(out_buffer, &sym_out_args, (uint32_t)sizeof(sym_out_args));
|
produce_data(out_buffer, &sym_out_args, sizeof(sym_out_args));
|
||||||
return PVA_SUCCESS;
|
return PVA_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -901,7 +910,7 @@ pva_kmd_sync_ops_handler(struct pva_kmd_context *ctx,
|
|||||||
|
|
||||||
header = peek_data(in_arg);
|
header = peek_data(in_arg);
|
||||||
|
|
||||||
if (!access_ok(in_arg, (uint32_t)header->size)) {
|
if (!access_ok(in_arg, header->size)) {
|
||||||
err = PVA_INVAL;
|
err = PVA_INVAL;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@@ -914,7 +923,7 @@ pva_kmd_sync_ops_handler(struct pva_kmd_context *ctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
input_buffer = consume_data(in_arg, (uint32_t)header->size);
|
input_buffer = consume_data(in_arg, (uint32_t)header->size);
|
||||||
input_buffer_size = (uint32_t)header->size;
|
input_buffer_size = (uint64_t)header->size;
|
||||||
|
|
||||||
if (input_buffer_size % (uint64_t)sizeof(uint64_t) != 0UL) {
|
if (input_buffer_size % (uint64_t)sizeof(uint64_t) != 0UL) {
|
||||||
pva_kmd_log_err("PVA operation size is not a multiple of 8");
|
pva_kmd_log_err("PVA operation size is not a multiple of 8");
|
||||||
@@ -922,11 +931,18 @@ pva_kmd_sync_ops_handler(struct pva_kmd_context *ctx,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Validate that opcode fits in uint32_t before casting */
|
||||||
|
if (header->opcode > U32_MAX) {
|
||||||
|
pva_kmd_log_err("Opcode exceeds U32_MAX");
|
||||||
|
err = PVA_INVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Check if the operation is allowed by the DVMS.
|
* Check if the operation is allowed by the DVMS.
|
||||||
* If not, return an error.
|
* If not, return an error.
|
||||||
*/
|
*/
|
||||||
if (!pva_kmd_is_ops_allowed(ctx, header->opcode)) {
|
/* CERT INT31-C: opcode validated to fit in uint32_t, safe to cast */
|
||||||
|
if (!pva_kmd_is_ops_allowed(ctx, (uint32_t)header->opcode)) {
|
||||||
err = PVA_NO_PERM;
|
err = PVA_NO_PERM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,8 +47,7 @@ init_context_resources(struct pva_kmd_cmdbuf_builder *builder,
|
|||||||
enum pva_error err = PVA_SUCCESS;
|
enum pva_error err = PVA_SUCCESS;
|
||||||
|
|
||||||
/**Initialize resource table */
|
/**Initialize resource table */
|
||||||
res_cmd =
|
res_cmd = pva_kmd_reserve_cmd_space(builder, sizeof(*res_cmd));
|
||||||
pva_kmd_reserve_cmd_space(builder, (uint16_t)sizeof(*res_cmd));
|
|
||||||
if (res_cmd == NULL) {
|
if (res_cmd == NULL) {
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"PVA: Memory alloc for context registration in FW resume command failed\n");
|
"PVA: Memory alloc for context registration in FW resume command failed\n");
|
||||||
@@ -63,8 +62,7 @@ init_context_resources(struct pva_kmd_cmdbuf_builder *builder,
|
|||||||
ctx->ctx_resource_table.table_mem->iova,
|
ctx->ctx_resource_table.table_mem->iova,
|
||||||
ctx->ctx_resource_table.n_entries, ctx->status_mem->iova);
|
ctx->ctx_resource_table.n_entries, ctx->status_mem->iova);
|
||||||
|
|
||||||
queue_cmd = pva_kmd_reserve_cmd_space(builder,
|
queue_cmd = pva_kmd_reserve_cmd_space(builder, sizeof(*queue_cmd));
|
||||||
(uint16_t)sizeof(*queue_cmd));
|
|
||||||
if (queue_cmd == NULL) {
|
if (queue_cmd == NULL) {
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"PVA: Memory alloc for queue registration in FW resume command failed\n");
|
"PVA: Memory alloc for queue registration in FW resume command failed\n");
|
||||||
@@ -73,8 +71,8 @@ init_context_resources(struct pva_kmd_cmdbuf_builder *builder,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Initialize shared buffer */
|
/* Initialize shared buffer */
|
||||||
shared_buf_cmd = pva_kmd_reserve_cmd_space(
|
shared_buf_cmd =
|
||||||
builder, (uint16_t)sizeof(*shared_buf_cmd));
|
pva_kmd_reserve_cmd_space(builder, sizeof(*shared_buf_cmd));
|
||||||
if (shared_buf_cmd == NULL) {
|
if (shared_buf_cmd == NULL) {
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"PVA: Memory alloc for shared buffer registration in FW resume command failed\n");
|
"PVA: Memory alloc for shared buffer registration in FW resume command failed\n");
|
||||||
@@ -136,7 +134,7 @@ static enum pva_error init_context_user_queues(
|
|||||||
"PVA: Resume queue for context %d, queue %d\n",
|
"PVA: Resume queue for context %d, queue %d\n",
|
||||||
queue->ccq_id, queue->queue_id);
|
queue->ccq_id, queue->queue_id);
|
||||||
queue_cmd = pva_kmd_reserve_cmd_space(
|
queue_cmd = pva_kmd_reserve_cmd_space(
|
||||||
builder, (uint16_t)sizeof(*queue_cmd));
|
builder, sizeof(*queue_cmd));
|
||||||
if (queue_cmd == NULL) {
|
if (queue_cmd == NULL) {
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"PVA: Memory alloc for queue registration in FW resume command failed\n");
|
"PVA: Memory alloc for queue registration in FW resume command failed\n");
|
||||||
@@ -204,8 +202,7 @@ enum pva_error pva_kmd_complete_resume(struct pva_kmd_device *pva)
|
|||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
fw_resume = pva_kmd_reserve_cmd_space(&builder,
|
fw_resume = pva_kmd_reserve_cmd_space(&builder, sizeof(*fw_resume));
|
||||||
(uint16_t)sizeof(*fw_resume));
|
|
||||||
if (fw_resume == NULL) {
|
if (fw_resume == NULL) {
|
||||||
pva_kmd_log_err(
|
pva_kmd_log_err(
|
||||||
"PVA: Memory alloc for FW resume command failed\n");
|
"PVA: Memory alloc for FW resume command failed\n");
|
||||||
|
|||||||
@@ -78,7 +78,7 @@ enum pva_error pva_kmd_shared_buffer_init(struct pva_kmd_device *pva,
|
|||||||
/* CERT INT31-C: iova and size validated to fit in uint32_t, safe to cast */
|
/* CERT INT31-C: iova and size validated to fit in uint32_t, safe to cast */
|
||||||
pva_kmd_set_cmd_init_shared_dram_buffer(&init_cmd, (uint8_t)interface,
|
pva_kmd_set_cmd_init_shared_dram_buffer(&init_cmd, (uint8_t)interface,
|
||||||
device_memory->iova,
|
device_memory->iova,
|
||||||
device_memory->size);
|
(uint32_t)device_memory->size);
|
||||||
|
|
||||||
err = pva_kmd_submit_cmd_sync(&pva->submitter, &init_cmd,
|
err = pva_kmd_submit_cmd_sync(&pva->submitter, &init_cmd,
|
||||||
(uint32_t)sizeof(init_cmd),
|
(uint32_t)sizeof(init_cmd),
|
||||||
@@ -211,9 +211,7 @@ static void process_res_unreg_msg(struct pva_kmd_device *pva, uint8_t interface,
|
|||||||
struct pva_kmd_context *ctx = NULL;
|
struct pva_kmd_context *ctx = NULL;
|
||||||
|
|
||||||
ASSERT(msg_size == sizeof(struct pva_kmd_fw_msg_res_unreg));
|
ASSERT(msg_size == sizeof(struct pva_kmd_fw_msg_res_unreg));
|
||||||
/* MISRA C-2023 Rule 11.5: msg_body is void*, explicit cast needed for type safety */
|
(void)memcpy((void *)&unreg_data, (const void *)msg_body,
|
||||||
(void)memcpy(&unreg_data,
|
|
||||||
(const struct pva_kmd_fw_msg_res_unreg *)msg_body,
|
|
||||||
sizeof(unreg_data));
|
sizeof(unreg_data));
|
||||||
ctx = pva_kmd_get_context(pva, interface);
|
ctx = pva_kmd_get_context(pva, interface);
|
||||||
|
|
||||||
@@ -236,8 +234,7 @@ static void shared_buffer_process_msg(struct pva_kmd_device *pva,
|
|||||||
ASSERT(msg != NULL);
|
ASSERT(msg != NULL);
|
||||||
|
|
||||||
// Copy the header
|
// Copy the header
|
||||||
(void)memcpy(&header, (const struct pva_kmd_fw_buffer_msg_header *)msg,
|
(void)memcpy((void *)&header, (const void *)msg, sizeof(header));
|
||||||
sizeof(header));
|
|
||||||
msg_size = safe_subu32(header.size, (uint32_t)sizeof(header));
|
msg_size = safe_subu32(header.size, (uint32_t)sizeof(header));
|
||||||
msg_body = (uint8_t *)msg + sizeof(header);
|
msg_body = (uint8_t *)msg + sizeof(header);
|
||||||
|
|
||||||
@@ -319,7 +316,8 @@ void pva_kmd_shared_buffer_process(void *pva_dev, uint8_t interface)
|
|||||||
// Note that ideally this should never happen as the buffer is expected to be
|
// Note that ideally this should never happen as the buffer is expected to be
|
||||||
// the same size as the resource table.
|
// the same size as the resource table.
|
||||||
// TODO: abort only the user context, not the device.
|
// TODO: abort only the user context, not the device.
|
||||||
pva_kmd_abort_fw(pva, PVA_BUF_OUT_OF_RANGE);
|
pva_kmd_abort_fw(pva,
|
||||||
|
(enum pva_error)PVA_BUF_OUT_OF_RANGE);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buffer corresponding to CCQ 0 is used for sending messages common to a VM.
|
// Buffer corresponding to CCQ 0 is used for sending messages common to a VM.
|
||||||
|
|||||||
@@ -121,10 +121,9 @@ void pva_kmd_config_sid(struct pva_kmd_device *pva)
|
|||||||
uint32_t offset;
|
uint32_t offset;
|
||||||
uint8_t priv1_sid;
|
uint8_t priv1_sid;
|
||||||
uint8_t priv_sid;
|
uint8_t priv_sid;
|
||||||
priv_sid = (uint8_t)(pva->stream_ids[PVA_R5_SMMU_CONTEXT_ID] &
|
priv_sid = pva->stream_ids[PVA_R5_SMMU_CONTEXT_ID] & (uint8_t)0xFFU;
|
||||||
(uint8_t)U8_MAX);
|
priv1_sid =
|
||||||
priv1_sid = (uint8_t)(pva->stream_ids[pva->r5_image_smmu_context_id] &
|
pva->stream_ids[pva->r5_image_smmu_context_id] & (uint8_t)0xFFU;
|
||||||
(uint8_t)U8_MAX);
|
|
||||||
|
|
||||||
/* Priv SIDs */
|
/* Priv SIDs */
|
||||||
if (pva->load_from_gsc) {
|
if (pva->load_from_gsc) {
|
||||||
|
|||||||
@@ -240,19 +240,12 @@ update_exports_symbol(elf_parser_ctx elf,
|
|||||||
uint32_t symOffset = 0U;
|
uint32_t symOffset = 0U;
|
||||||
enum pva_error err = PVA_SUCCESS;
|
enum pva_error err = PVA_SUCCESS;
|
||||||
pva_math_error math_err = MATH_OP_SUCCESS;
|
pva_math_error math_err = MATH_OP_SUCCESS;
|
||||||
uint32_t symbol_end;
|
|
||||||
uint32_t section_end;
|
|
||||||
|
|
||||||
/* Calculate symbol end address */
|
|
||||||
symbol_end = addu32(symbol_info->vmem_addr,
|
|
||||||
(uint32_t)SIZE_EXPORTS_TABLE_ENTRY, &math_err);
|
|
||||||
/* Calculate section end address */
|
|
||||||
section_end =
|
|
||||||
addu32(section_header->addr, section_header->size, &math_err);
|
|
||||||
|
|
||||||
if ((section_header == NULL) ||
|
if ((section_header == NULL) ||
|
||||||
(symbol_info->vmem_addr < section_header->addr) ||
|
(symbol_info->vmem_addr < section_header->addr) ||
|
||||||
(symbol_end > section_end)) {
|
(addu32(symbol_info->vmem_addr, (uint32_t)SIZE_EXPORTS_TABLE_ENTRY,
|
||||||
|
&math_err) >
|
||||||
|
addu32(section_header->addr, section_header->size, &math_err))) {
|
||||||
err = PVA_INVAL;
|
err = PVA_INVAL;
|
||||||
goto done;
|
goto done;
|
||||||
} else {
|
} else {
|
||||||
@@ -562,7 +555,7 @@ static void copy_data_section(const elf_parser_ctx elf,
|
|||||||
|
|
||||||
ASSERT(elf_data != NULL);
|
ASSERT(elf_data != NULL);
|
||||||
|
|
||||||
(void)memcpy(dst, (const void *)elf_data, section_header->size);
|
(void)memcpy(dst, elf_data, section_header->size);
|
||||||
|
|
||||||
*buffer_offset = safe_addu32(*buffer_offset, aligned_size);
|
*buffer_offset = safe_addu32(*buffer_offset, aligned_size);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ int pva_kmd_hwpm_ip_reg_op(void *ip_dev, uint32_t reg_op,
|
|||||||
{
|
{
|
||||||
struct pva_kmd_device *pva = ip_dev;
|
struct pva_kmd_device *pva = ip_dev;
|
||||||
|
|
||||||
if (reg_offset > U32_MAX)
|
if (reg_offset > UINT32_MAX)
|
||||||
return PVA_INVAL;
|
return PVA_INVAL;
|
||||||
|
|
||||||
switch (reg_op) {
|
switch (reg_op) {
|
||||||
|
|||||||
@@ -48,14 +48,14 @@ void pva_kmd_hyp_isr(void *data, enum pva_kmd_intr_line intr_line)
|
|||||||
/* Clear interrupt status */
|
/* Clear interrupt status */
|
||||||
pva_kmd_write(pva, pva->regspec.sec_lic_intr_status, wdt_val);
|
pva_kmd_write(pva, pva->regspec.sec_lic_intr_status, wdt_val);
|
||||||
pva_kmd_log_err("PVA watchdog timeout!");
|
pva_kmd_log_err("PVA watchdog timeout!");
|
||||||
pva_kmd_abort_fw(pva, PVA_ERR_WDT_TIMEOUT);
|
pva_kmd_abort_fw(pva, (enum pva_error)PVA_ERR_WDT_TIMEOUT);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (h1x_val != 0U) {
|
if (h1x_val != 0U) {
|
||||||
pva_kmd_log_err_u64("Host1x errors", h1x_val);
|
pva_kmd_log_err_u64("Host1x errors", h1x_val);
|
||||||
/* Clear interrupt status */
|
/* Clear interrupt status */
|
||||||
pva_kmd_write(pva, pva->regspec.sec_lic_intr_status, h1x_val);
|
pva_kmd_write(pva, pva->regspec.sec_lic_intr_status, h1x_val);
|
||||||
pva_kmd_abort_fw(pva, PVA_ERR_HOST1X_ERR);
|
pva_kmd_abort_fw(pva, (enum pva_error)PVA_ERR_HOST1X_ERR);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hsp_val != 0U) {
|
if (hsp_val != 0U) {
|
||||||
|
|||||||
@@ -236,6 +236,6 @@ static inline uint32_t array_index_nospec(uint32_t index, uint32_t size)
|
|||||||
uint64_t pva_kmd_get_time_tsc(void);
|
uint64_t pva_kmd_get_time_tsc(void);
|
||||||
|
|
||||||
/* Shim function with platform-specific implementations (QNX, Linux, Native) */
|
/* Shim function with platform-specific implementations (QNX, Linux, Native) */
|
||||||
bool pva_kmd_is_ops_allowed(struct pva_kmd_context *ctx, uint64_t opcode);
|
bool pva_kmd_is_ops_allowed(struct pva_kmd_context *ctx, uint32_t opcode);
|
||||||
|
|
||||||
#endif // PVA_KMD_SHIM_UTILS_H
|
#endif // PVA_KMD_SHIM_UTILS_H
|
||||||
|
|||||||
@@ -146,7 +146,7 @@ int pva_kmd_atomic_load(pva_kmd_atomic_t *atomic_val)
|
|||||||
return atomic_read(atomic_val);
|
return atomic_read(atomic_val);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool pva_kmd_is_ops_allowed(struct pva_kmd_context *ctx, uint64_t opcode)
|
bool pva_kmd_is_ops_allowed(struct pva_kmd_context *ctx, uint32_t opcode)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -110,7 +110,7 @@
|
|||||||
/**
|
/**
|
||||||
* @brief Macro to set a given bit position in a 32 bit number.
|
* @brief Macro to set a given bit position in a 32 bit number.
|
||||||
*/
|
*/
|
||||||
#define PVA_BIT(_b_) (((uint32_t)1U << (_b_)) & 0xffffffffU)
|
#define PVA_BIT(_b_) ((uint32_t)(((uint32_t)1U << (_b_)) & 0xffffffffu))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Macro to mask a range(MSB to LSB) of bit positions in a 32 bit number.
|
* @brief Macro to mask a range(MSB to LSB) of bit positions in a 32 bit number.
|
||||||
|
|||||||
@@ -8,8 +8,8 @@
|
|||||||
#include "pva_bit.h"
|
#include "pva_bit.h"
|
||||||
|
|
||||||
#define PVA_ROUND_UP(val, align) ((((val) + ((align)-1U)) / (align)) * (align))
|
#define PVA_ROUND_UP(val, align) ((((val) + ((align)-1U)) / (align)) * (align))
|
||||||
#define PVA_ALIGN4(n) PVA_ROUND_UP(n, 4U)
|
#define PVA_ALIGN4(n) PVA_ROUND_UP(n, 4)
|
||||||
#define PVA_ALIGN8(n) PVA_ROUND_UP(n, 8U)
|
#define PVA_ALIGN8(n) PVA_ROUND_UP(n, 8)
|
||||||
|
|
||||||
static inline uint64_t assemble_addr(uint8_t hi, uint32_t lo)
|
static inline uint64_t assemble_addr(uint8_t hi, uint32_t lo)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ extern "C" {
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/** rief Specifies the PVA system software commit ID. */
|
/** rief Specifies the PVA system software commit ID. */
|
||||||
#define PVA_SYSSW_COMMIT_ID "54b5ea8396784e52556fa0d8a0fbfba7681de793"
|
#define PVA_SYSSW_COMMIT_ID "0d38bbb7659432534c6ac5147629876144d3bc0b"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user