kmd: Update PVA SCR values in standard build

- MB2 cannot program SCR values becuase PVA is poweredoff
- KMD cannot access these registers because they are not mapped for VM
access
- Hypervisor programs these registers but KMD needs to trap for PVA
reset usecase

Bug 4450663

Change-Id: I9e6bfdfbc09650a2b7fea0e7a10702a971bc38f4
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3059459
Reviewed-by: Bhushan Patil <bhushanp@nvidia.com>
Reviewed-by: Karthik Srirangapatna Maheshwarappa <kmaheshwarap@nvidia.com>
Reviewed-by: Mohnish Jain <mohnishj@nvidia.com>
Tested-by: Karthik Srirangapatna Maheshwarappa <kmaheshwarap@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
(cherry picked from commit 2880d777f303602f72b7a7d1a6348d8272aa8238)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3059488
Reviewed-by: Sai Vishal Pothula <spothula@nvidia.com>
This commit is contained in:
Karthik SM
2024-01-22 12:31:19 +00:00
committed by mobile promotions
parent 5713bc51b0
commit d4443f6037
4 changed files with 60 additions and 5 deletions

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2022-2024, NVIDIA Corporation. All rights reserved.
*/
#ifndef PVA_FW_ADDRESS_MAP_H
@@ -114,4 +114,31 @@
*/
#define FW_SHARED_MEMORY_START 2147483648 //0x80000000
/**
* @defgroup PVA_HYP_SCR_VALUES
*
* @brief Following macros specify SCR firewall values that are expected to be
* programmed by Hypervisor.
* @{
*/
/**
* @brief EVP SCR firewall to enable only CCPLEX read/write access.
*/
#define PVA_EVP_SCR_VAL 0x19000202
/**
* @brief PRIV SCR firewall to enable only CCPLEX and R5 read/write access.
*/
#define PVA_PRIV_SCR_VAL 0x1F008282
/**
* @brief CCQ SCR firewall to enable only CCPLEX write access and R5 read access.
*/
#define PVA_CCQ_SCR_VAL 0x19000280
/**
* @brief CCQ SCR firewall to enable only CCPLEX read access and R5 read/write access.
*/
#define PVA_STATUS_CTL_SCR_VAL 0x1F008082
/** @} */
#endif

View File

@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved.
*
* Copyright (c) 2019-2024, NVIDIA CORPORATION. All rights reserved.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
@@ -107,6 +109,21 @@ static inline u32 v2_cfg_priv_ar2_end_r(void)
return V2_ADDRESS_CONTROL_BASE + 0x34U;
}
static inline u32 cfg_scr_status_ctrl_r(void)
{
return (V2_ADDRESS_CONTROL_BASE + 0x8000U);
}
static inline u32 cfg_scr_priv_0_r(void)
{
return (V2_ADDRESS_CONTROL_BASE + 0x8008U);
}
static inline u32 cfg_scr_ccq_ctrl_r(void)
{
return (V2_ADDRESS_CONTROL_BASE + 0x8010U);
}
#define V2_CFG_CCQ_BASE 0x260000U
#define V2_CFG_CCQ_SIZE 0x010000U

View File

@@ -1,7 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2023, NVIDIA CORPORATION. All rights reserved.
*
* Copyright (c) 2016-2024, NVIDIA CORPORATION. All rights reserved.
*/
/*
* Function naming determines intended use:
*
* <x>_r(void) : Returns the offset for register <x>.
@@ -70,4 +72,8 @@ static inline u32 evp_fiq_addr_r(void)
{
return 0x3c;
}
static inline u32 evp_scr_r(void)
{
return 0x40U;
}
#endif

View File

@@ -366,6 +366,11 @@ static int pva_init_fw(struct platform_device *pdev)
host1x_writel(pdev,
cfg_priv_ar1_usegreg_r(pva->version),
0xFFFFFFFF);
host1x_writel(pdev, evp_scr_r(), PVA_EVP_SCR_VAL);
host1x_writel(pdev, cfg_scr_status_ctrl_r(), PVA_STATUS_CTL_SCR_VAL);
host1x_writel(pdev, cfg_scr_priv_0_r(), PVA_PRIV_SCR_VAL);
host1x_writel(pdev, cfg_scr_ccq_ctrl_r(), PVA_CCQ_SCR_VAL);
} else {
host1x_writel(pdev,
cfg_priv_ar1_lsegreg_r(pva->version),