drivers: pva: support PVA on Thor with HV

Jira PVAAS-15366

Change-Id: I74b37d6e2dee09d40b1b64647d8a98a643e23f05
Signed-off-by: omar <onemri@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3160295
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Amruta Sai Anusha Bhamidipati <abhamidipati@nvidia.com>
This commit is contained in:
omar
2024-05-13 17:17:23 +00:00
committed by mobile promotions
parent 955de38d70
commit 38b4ef0bdd
3 changed files with 102 additions and 38 deletions

View File

@@ -43,7 +43,10 @@ client_context_search_locked(struct platform_device *pdev,
if (i >= NVPVA_CLIENT_MAX_CONTEXTS_PER_ENG)
return NULL;
shared_cntxt_dev = i > (NVPVA_CLIENT_MAX_CONTEXTS_PER_ENG - 3);
if (dev->version <= PVA_HW_GEN2)
shared_cntxt_dev = i > (NVPVA_CLIENT_MAX_CONTEXTS_PER_ENG - 3);
else
shared_cntxt_dev = false;
c_node->pid = pid;
c_node->pva = dev;

View File

@@ -362,7 +362,7 @@ static int pva_init_fw(struct platform_device *pdev)
cfg_priv_ar1_end_r(pva->version),
FW_CODE_DATA_END_ADDR);
useg_addr = priv1_buffer->pa - FW_CODE_DATA_START_ADDR;
if (pva->is_hv_mode) {
if ((pva->is_hv_mode) && (!pva->boot_from_file)) {
host1x_writel(pdev,
cfg_priv_ar1_lsegreg_r(pva->version),
0xFFFFFFFF);
@@ -390,7 +390,6 @@ static int pva_init_fw(struct platform_device *pdev)
host1x_writel(pdev, cfg_scr_priv_0_r(), PVA_PRIV_SCR_VAL | PVA_LOCK_SCR);
host1x_writel(pdev, cfg_scr_ccq_ctrl_r(), PVA_CCQ_SCR_VAL | PVA_LOCK_SCR);
}
}
/* Indicate the OS is waiting for PVA ready Interrupt */
@@ -816,7 +815,8 @@ static int nvpva_write_hwid(struct platform_device *pdev)
/* Go through the StreamIDs and assemble register values */
for (i = 0; i < ARRAY_SIZE(pdata->vm_regs); i++) {
u64 addr = pdata->vm_regs[i].addr;
u32 shift = pdata->vm_regs[i].shift;
u32 shift = pdata->vm_regs[i].shift & 0x0000FFFF;
u32 mask = (pdata->vm_regs[i].shift >> 16) & 0x0000FFFF;
u32 val;
/* Break if this was the last StreamID */
@@ -824,7 +824,10 @@ static int nvpva_write_hwid(struct platform_device *pdev)
break;
/* Update the StreamID value */
val = ((streamids[id_idx[i]] & 0x000000FF) << shift);
if(mask == 0 )
mask = 0x000000FF;
val = ((streamids[id_idx[i]] & mask) << shift);
reg_array[reg_idx[i]] |= val;
}
@@ -1274,13 +1277,20 @@ static int pva_probe(struct platform_device *pdev)
else
pva->map_co_needed = true;
#ifdef CONFIG_PVA_CO_DISABLED
pva->boot_from_file = true;
#else
if ((pdata->version == PVA_HW_GEN1) || (pdata->version == PVA_HW_GEN3))
if (pdata->version == PVA_HW_GEN1)
pva->boot_from_file = true;
else
pva->boot_from_file = false;
#if defined(CONFIG_PVA_CO_DISABLED_T264)
if (pdata->version == PVA_HW_GEN3)
pva->boot_from_file = true;
#endif
#if defined(CONFIG_PVA_CO_DISABLED)
if (pdata->version == PVA_HW_GEN2)
pva->boot_from_file = true;
#endif
#ifdef __linux__

View File

@@ -26,7 +26,6 @@
#define NVPVA_CNTXT_DEVICE_CNT (8U)
#ifdef CONFIG_TEGRA_T26X_GRHOST_PVA
#include "pva_cntxt_dev_name_t264.h"
#include "pva_iommu_context_dev_t264.h"
#else
#define NVPVA_CNTXT_DEV_NAME_LEN NVPVA_CNTXT_DEV_NAME_LEN_T23X
@@ -43,13 +42,43 @@ static char *dev_names[] = {
"pva0_niso1_ctx5",
"pva0_niso1_ctx6",
"pva0_niso1_ctx7",
#ifdef CONFIG_TEGRA_T26X_GRHOST_PVA
PVA_CNTXT_DEV_NAME_T264
#endif
"pva0_niso1_ctx8",
};
static const struct of_device_id pva_iommu_context_dev_of_match[] = {
{.compatible = "nvidia,pva-tegra186-iommu-context"},
#define PVA_HW_DONT_CARE (0)
struct nvpva_ctx_device_data {
u32 version;
u32 pva_cntxt_dev_cnt;
u32 pva_cntxt_dev_name_len;
u32 aux_dev_idx;
};
static struct nvpva_ctx_device_data told_ctx_dev_info = {
.version = PVA_HW_GEN2,
.pva_cntxt_dev_cnt = 8,
.pva_cntxt_dev_name_len = 29,
.aux_dev_idx = 7,
};
static struct nvpva_ctx_device_data t264_ctx_dev_info = {
.version = PVA_HW_GEN3,
.pva_cntxt_dev_cnt = 9,
.pva_cntxt_dev_name_len = 31,
.aux_dev_idx = 8,
};
static u32 pva_cntxt_dev_cnt[4] = {0, 0, 8, 9};
static struct of_device_id pva_iommu_context_dev_of_match[] = {
{
.compatible = "nvidia,pva-tegra186-iommu-context",
.data = (struct nvpva_ctx_device_data *)&told_ctx_dev_info
},
{
.compatible = "nvidia,pva-tegra264-iommu-context",
.data = (struct nvpva_ctx_device_data *)&t264_ctx_dev_info
},
{},
};
@@ -64,12 +93,13 @@ struct pva_iommu_ctx {
static LIST_HEAD(pva_iommu_ctx_list);
static DEFINE_MUTEX(pva_iommu_ctx_list_mutex);
static u32 pva_cntxt_dev_name_len = 0;
static u32 aux_dev_idx = 0;
static u32 aux_dev_idex_name_len = 0;
bool is_cntxt_initialized(const int hw_gen)
{
u32 pva_cntxt_dev_cnt = (hw_gen == PVA_HW_GEN3) ? NVPVA_CNTXT_DEVICE_CNT_T264
: NVPVA_CNTXT_DEVICE_CNT;
return (cntxt_dev_count == pva_cntxt_dev_cnt);
return (cntxt_dev_count == pva_cntxt_dev_cnt[hw_gen]);
}
int nvpva_iommu_context_dev_get_sids(int *hwids, int *count, const int hw_gen)
@@ -77,19 +107,14 @@ int nvpva_iommu_context_dev_get_sids(int *hwids, int *count, const int hw_gen)
struct pva_iommu_ctx *ctx;
int err = 0;
int i;
u32 pva_cntxt_dev_cnt;
if (hw_gen == PVA_HW_GEN3)
pva_cntxt_dev_cnt = NVPVA_CNTXT_DEVICE_CNT_T264;
else
pva_cntxt_dev_cnt = NVPVA_CNTXT_DEVICE_CNT;
*count = 0;
mutex_lock(&pva_iommu_ctx_list_mutex);
for (i = 0; i < pva_cntxt_dev_cnt; i++) {
for (i = 0; i < pva_cntxt_dev_cnt[hw_gen]; i++) {
list_for_each_entry(ctx, &pva_iommu_ctx_list, list) {
if (strnstr(ctx->pdev->name, dev_names[i],
NVPVA_CNTXT_DEV_NAME_LEN) != NULL) {
pva_cntxt_dev_name_len) != NULL) {
hwids[*count] = nvpva_get_device_hwid(ctx->pdev, 0);
if (hwids[*count] < 0) {
err = hwids[*count];
@@ -97,7 +122,7 @@ int nvpva_iommu_context_dev_get_sids(int *hwids, int *count, const int hw_gen)
}
++(*count);
if (*count >= pva_cntxt_dev_cnt)
if (*count >= pva_cntxt_dev_cnt[hw_gen])
break;
}
}
@@ -178,24 +203,47 @@ void nvpva_iommu_context_dev_release(struct platform_device *pdev)
static int pva_iommu_context_dev_probe(struct platform_device *pdev)
{
struct pva_iommu_ctx *ctx;
struct device *dev = &pdev->dev;
struct nvpva_ctx_device_data *pdata;
const struct of_device_id *match;
int err = 0;
if (!iommu_get_domain_for_dev(&pdev->dev)) {
dev_err(&pdev->dev,
match = of_match_device(pva_iommu_context_dev_of_match, dev);
if (!match) {
dev_err(dev, "no match for pva ctx dev dev\n");
err = -ENODATA;
goto err_get_pdata;
}
pdata = (struct nvpva_ctx_device_data *)match->data;
WARN_ON(!pdata);
if (!pdata) {
dev_info(dev, "no platform data\n");
err = -ENODATA;
goto err_get_pdata;
}
aux_dev_idx = pdata->aux_dev_idx;
aux_dev_idex_name_len = pdata->pva_cntxt_dev_name_len;
pva_cntxt_dev_name_len = pdata->pva_cntxt_dev_name_len;
if (!iommu_get_domain_for_dev(dev)) {
dev_err(dev,
"iommu is not enabled for context device. aborting.");
return -ENOSYS;
}
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
dev_err(&pdev->dev,
dev_err(dev,
"%s: could not allocate iommu ctx\n", __func__);
return -ENOMEM;
}
if (strnstr(pdev->name, dev_names[7], NVPVA_CNTXT_DEV_NAME_LEN) != NULL)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (strnstr(pdev->name, dev_names[aux_dev_idx], aux_dev_idex_name_len ) != NULL)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
else
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
INIT_LIST_HEAD(&ctx->list);
ctx->pdev = pdev;
@@ -208,7 +256,7 @@ static int pva_iommu_context_dev_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
pdev->dev.dma_parms = &ctx->dma_parms;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_set_max_seg_size(dev, UINT_MAX);
#ifdef CONFIG_NVMAP
/* flag required to handle stashings in context devices */
@@ -216,14 +264,17 @@ static int pva_iommu_context_dev_probe(struct platform_device *pdev)
#endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 0, 0)
dev_info(&pdev->dev, "initialized (streamid=%d, iommu=%s)",
dev_info(dev, "initialized (streamid=%d, iommu=%s)",
nvpva_get_device_hwid(pdev, 0),
dev_name(pdev->dev.iommu->iommu_dev->dev));
#else
dev_info(&pdev->dev, "initialized (streamid=%d)",
dev_info(dev, "initialized (streamid=%d)",
nvpva_get_device_hwid(pdev, 0));
#endif
return 0;
err_get_pdata:
return err;
}
static int __exit pva_iommu_context_dev_remove(struct platform_device *pdev)