drivers: pva: t26x: Fix HWSEQ RRA validation check

HWSEQ buffer is validated for each frame for all addressing modes
while verifying the hwseq blob because in t26x multiple frames
with different modes can be linked in HW to describe a use case.

Removing checking all the frames specifically for RRA mode here.

Bug 4588253

Signed-off-by: Amruta Bhamidipati<abhamidipati@nvidia.com>

Change-Id: Ib4c386dccca6554bbf4bace046ed182b5910da00
Reviewed-on: https://git-master.nvidia.com/r/c/linux-t264/+/3153300
Reviewed-by: Sreehari Mohan <sreeharim@nvidia.com>
Tested-by: Amruta Sai Anusha Bhamidipati <abhamidipati@nvidia.com>
Reviewed-by: Krish Agarwal <krisha@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Omar Nemri <onemri@nvidia.com>
This commit is contained in:
Amruta Bhamidipati
2024-01-18 06:11:17 +00:00
committed by Jon Hunter
parent 1a030099f0
commit ee9b187f9a
2 changed files with 33 additions and 46 deletions

View File

@@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -37,6 +38,13 @@
* R5 FW reserves one DMA channel for internal use. * R5 FW reserves one DMA channel for internal use.
*/ */
#define NVPVA_TASK_MAX_DMA_CHANNELS_T26X (15U) #define NVPVA_TASK_MAX_DMA_CHANNELS_T26X (15U)
/**
* Maximum number of frames allowed in hwseq mode
* on T26x is 64.
*/
#define NVPVA_TASK_MAX_HWSEQ_FRAME_COUNT_T26X (64U)
/* NOTE: This is a re-definition of nvpva_dma_channel that /* NOTE: This is a re-definition of nvpva_dma_channel that
* contains T26x specific changes. Once T26x is public, * contains T26x specific changes. Once T26x is public,
* this definition may be merged nvpva_dma_channel. * this definition may be merged nvpva_dma_channel.

View File

@@ -1,5 +1,6 @@
/* /*
* Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. * SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2023-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -20,8 +21,7 @@
#define PVA_HWSEQ_RAM_SIZE_T26X 2048U #define PVA_HWSEQ_RAM_SIZE_T26X 2048U
#define PVA_HWSEQ_RAM_ID_MASK_T26X 0x1FFU #define PVA_HWSEQ_RAM_ID_MASK_T26X 0x1FFU
#define PVA_HWSEQ_RRA_ADDR 0xC0DAU #define PVA_HWSEQ_RRA_ADDR 0xC0DAU
#define PVA_HWSEQ_RRA_MAX_NOCR 31U #define PVA_HWSEQ_MAX_CR_COUNT_T26X 32U
#define PVA_HWSEQ_RRA_MAX_FRAME_COUNT 63U
/** \brief Mask used to derive the MSB for HW sequencer /** \brief Mask used to derive the MSB for HW sequencer
* buffer start index for a channel * buffer start index for a channel
@@ -89,7 +89,6 @@ static int validate_rra_mode(struct pva_hw_sweq_blob_s *blob,
const u8 *desc_entry = NULL; const u8 *desc_entry = NULL;
const u8 *column = 0U; const u8 *column = 0U;
uint32_t i = 0U; uint32_t i = 0U;
uint32_t f = 0U;
uint32_t num_columns = 0U; uint32_t num_columns = 0U;
u32 end = nvpva_get_hwseq_end_idx_t26x(dma_ch) * 4U; u32 end = nvpva_get_hwseq_end_idx_t26x(dma_ch) * 4U;
u8 *blob_end = &((uint8_t *)blob)[end + 4]; u8 *blob_end = &((uint8_t *)blob)[end + 4];
@@ -102,52 +101,32 @@ static int validate_rra_mode(struct pva_hw_sweq_blob_s *blob,
return -EINVAL; return -EINVAL;
} }
if (dma_ch->hwseqFrameCount > PVA_HWSEQ_RRA_MAX_FRAME_COUNT) { if (blob->f_header.fr != 0) {
pr_err("Invalid HWSEQ frame count"); pr_err("Invalid HWSEQ repetition factor");
return -EINVAL; return -EINVAL;
} }
// Validate each frame contained in the HW Seq blob num_columns = blob->f_header.no_cr + 1U;
for (f = 0; f < dma_ch->hwseqFrameCount + 1; f++) column = (u8 *)&blob->cr_header;
{ desc_entry = (u8 *)&blob->desc_header;
if (blob->f_header.no_cr > PVA_HWSEQ_RRA_MAX_NOCR) {
pr_err("Invalid HWSEQ column count. NOCR = %u", // Ensure there are sufficient CRO and Desc ID entries
blob->f_header.no_cr); // in the HWSEQ blob
if (((blob_end - column) / column_entry_size) < num_columns) {
pr_err("HWSEQ Program does not have enough columns.");
return -EINVAL;
}
for (i = 0U; i < num_columns; i++) {
// In RRA mode, each HWSEQ column has only 1 descriptor
// Hence, we validate the first descriptor and ignore
// the second descriptor in each column
if ((*desc_entry == 0U) || (*desc_entry >
(NVPVA_TASK_MAX_DMA_DESCRIPTOR_ID_T26X))) {
return -EINVAL; return -EINVAL;
} }
set_hwseq_mode_rra(task, *desc_entry -1U);
if (blob->f_header.fr != 0) { desc_entry += column_entry_size;
pr_err("Invalid HWSEQ repetition factor");
return -EINVAL;
}
num_columns = blob->f_header.no_cr + 1U;
column = (u8 *)&blob->cr_header;
desc_entry = (u8 *)&blob->desc_header;
// Ensure there are sufficient CRO and Desc ID entries
// in the HWSEQ blob
if (((blob_end - column) / column_entry_size) < num_columns) {
pr_err("HWSEQ Program does not have enough columns.");
return -EINVAL;
}
for (i = 0U; i < num_columns; i++) {
// In RRA mode, each HWSEQ column has only 1 descriptor
// Hence, we validate the first descriptor and ignore
// the second descriptor in each column
if ((*desc_entry == 0U) || (*desc_entry >
(NVPVA_TASK_MAX_DMA_DESCRIPTOR_ID_T26X))) {
return -EINVAL;
}
set_hwseq_mode_rra(task, *desc_entry -1U);
desc_entry += column_entry_size;
}
// Move blob pointer to the start of next frame
blob = (struct pva_hw_sweq_blob_s *)((u8 *)blob
+ sizeof(struct pva_hw_sweq_blob_s)
+ ((num_columns - 1U) * column_entry_size));
} }
return 0; return 0;