tegra: hwpm: create memory buffer structures

Stream and allowlist buffers are allocated by the user as dma buffers
and mapped in virtual address space by the driver. The DMA and mapping
functions are linux specific. Hence create memory management and
allowlist linux structures. Add these linux memory structure pointers in
the tegra_hwpm parent structure.

Jira THWPM-60

Change-Id: I2526f2bab835df4c5a922b0b375c22a6247aad30
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2729664
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2022-06-15 22:08:47 -07:00
committed by mobile promotions
parent 378bd9bb1c
commit 486ec4a24c
13 changed files with 256 additions and 147 deletions

View File

@@ -15,13 +15,14 @@
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_static_analysis.h>
int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
hwpm->full_alist_size = 0ULL;
hwpm->alist_map->full_alist_size = 0ULL;
tegra_hwpm_fn(hwpm, " ");
@@ -51,11 +52,12 @@ int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
return err;
}
/* Check size of full alist with hwpm->full_alist_size*/
if (func_args.full_alist_idx != hwpm->full_alist_size) {
/* Check size of full alist with hwpm->alist_map->full_alist_size*/
if (func_args.full_alist_idx != hwpm->alist_map->full_alist_size) {
tegra_hwpm_err(hwpm, "full_alist_size 0x%llx doesn't match "
"max full_alist_idx 0x%llx",
hwpm->full_alist_size, func_args.full_alist_idx);
hwpm->alist_map->full_alist_size,
func_args.full_alist_idx);
err = -EINVAL;
}

View File

@@ -12,6 +12,7 @@
*/
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_aperture.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_kmem.h>
@@ -269,8 +270,10 @@ static int tegra_hwpm_func_single_element(struct tegra_soc_hwpm *hwpm,
return 0;
}
if (element->alist) {
hwpm->full_alist_size = tegra_hwpm_safe_add_u64(
hwpm->full_alist_size, element->alist_size);
hwpm->alist_map->full_alist_size =
tegra_hwpm_safe_add_u64(
hwpm->alist_map->full_alist_size,
element->alist_size);
} else {
tegra_hwpm_err(hwpm, "IP %d"
" element type %d static_idx %d NULL alist",

View File

@@ -11,12 +11,13 @@
* more details.
*/
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm_ip.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_common.h>
#include <hal/t234/t234_init.h>
#ifdef CONFIG_TEGRA_NEXT1_HWPM
@@ -139,7 +140,7 @@ int tegra_hwpm_setup_sw(struct tegra_soc_hwpm *hwpm)
/* Initialize SW state */
hwpm->bind_completed = false;
hwpm->full_alist_size = 0;
hwpm->alist_map->full_alist_size = 0;
return 0;
}

View File

@@ -11,13 +11,11 @@
* more details.
*/
#include <linux/bitops.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_static_analysis.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_static_analysis.h>
#include <hal/t234/t234_internal.h>
#include <hal/t234/t234_regops_allowlist.h>
@@ -64,7 +62,7 @@ int t234_hwpm_copy_alist(struct tegra_soc_hwpm *hwpm,
}
for (alist_idx = 0ULL; alist_idx < aperture->alist_size; alist_idx++) {
if (f_alist_idx >= hwpm->full_alist_size) {
if (f_alist_idx >= hwpm->alist_map->full_alist_size) {
tegra_hwpm_err(hwpm, "No space in full_alist");
return -ENOMEM;
}

View File

@@ -11,11 +11,6 @@
* more details.
*/
#include <soc/tegra/fuse.h>
#include <linux/of_address.h>
#include <linux/dma-buf.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>

View File

@@ -99,8 +99,7 @@ int t234_hwpm_perfmon_disable(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *perfmon);
int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream);
int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_invalidate_mem_config(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm);
int t234_hwpm_disable_pma_streaming(struct tegra_soc_hwpm *hwpm);

View File

@@ -11,9 +11,6 @@
* more details.
*/
#include <soc/tegra/fuse.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm_log.h>

View File

@@ -11,10 +11,7 @@
* more details.
*/
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
@@ -65,14 +62,13 @@ int t234_hwpm_disable_mem_mgmt(struct tegra_soc_hwpm *hwpm)
return 0;
}
int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 outbase_lo = 0;
u32 outbase_hi = 0;
u32 outsize = 0;
u32 mem_bytes_addr = 0;
u64 mem_bytes_addr = 0ULL;
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[
active_chip->get_rtr_int_idx(hwpm)];
@@ -81,11 +77,11 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_aperture *pma_perfmux = &ip_inst_pma->element_info[
TEGRA_HWPM_APERTURE_TYPE_PERFMUX].element_static_array[
T234_HWPM_IP_RTR_PERMUX_INDEX];
struct tegra_hwpm_mem_mgmt *mem_mgmt = hwpm->mem_mgmt;
tegra_hwpm_fn(hwpm, " ");
outbase_lo = alloc_pma_stream->stream_buf_pma_va &
pmasys_channel_outbase_ptr_m();
outbase_lo = mem_mgmt->stream_buf_va & pmasys_channel_outbase_ptr_m();
err = tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbase_r(0), outbase_lo);
if (err != 0) {
@@ -94,7 +90,7 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
}
tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTBASE = 0x%x", outbase_lo);
outbase_hi = (alloc_pma_stream->stream_buf_pma_va >> 32) &
outbase_hi = (mem_mgmt->stream_buf_va >> 32) &
pmasys_channel_outbaseupper_ptr_m();
err = tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outbaseupper_r(0), outbase_hi);
@@ -104,7 +100,7 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
}
tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTBASEUPPER = 0x%x", outbase_hi);
outsize = alloc_pma_stream->stream_buf_size &
outsize = mem_mgmt->stream_buf_size &
pmasys_channel_outsize_numbytes_m();
err = tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_outsize_r(0), outsize);
@@ -114,7 +110,7 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
}
tegra_hwpm_dbg(hwpm, hwpm_verbose, "OUTSIZE = 0x%x", outsize);
mem_bytes_addr = sg_dma_address(hwpm->mem_bytes_sgt->sgl) &
mem_bytes_addr = mem_mgmt->mem_bytes_buf_va &
pmasys_channel_mem_bytes_addr_ptr_m();
err = tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_bytes_addr_r(0), mem_bytes_addr);
@@ -125,7 +121,8 @@ int t234_hwpm_enable_mem_mgmt(struct tegra_soc_hwpm *hwpm,
tegra_hwpm_dbg(hwpm, hwpm_verbose,
"MEM_BYTES_ADDR = 0x%x", mem_bytes_addr);
err = tegra_hwpm_writel(hwpm, pma_perfmux, pmasys_channel_mem_block_r(0),
err = tegra_hwpm_writel(hwpm, pma_perfmux,
pmasys_channel_mem_block_r(0),
pmasys_channel_mem_block_valid_f(
pmasys_channel_mem_block_valid_true_v()));
if (err != 0) {
@@ -165,7 +162,8 @@ int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm)
{
int err = 0;
u32 reg_val = 0U;
u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel);
u32 *mem_bytes_kernel_u32 =
(u32 *)(hwpm->mem_mgmt->mem_bytes_kernel);
struct tegra_soc_hwpm_chip *active_chip = hwpm->active_chip;
struct hwpm_ip *chip_ip = active_chip->chip_ips[
active_chip->get_rtr_int_idx(hwpm)];
@@ -177,7 +175,7 @@ int t234_hwpm_stream_mem_bytes(struct tegra_soc_hwpm *hwpm)
tegra_hwpm_fn(hwpm, " ");
*mem_bytes_kernel_u32 = TEGRA_SOC_HWPM_MEM_BYTES_INVALID;
*mem_bytes_kernel_u32 = TEGRA_HWPM_MEM_BYTES_INVALID;
err = tegra_hwpm_readl(hwpm, pma_perfmux,
pmasys_channel_control_user_r(0), &reg_val);

View File

@@ -371,8 +371,7 @@ struct tegra_soc_hwpm_chip {
struct hwpm_ip_aperture *perfmux);
int (*disable_mem_mgmt)(struct tegra_soc_hwpm *hwpm);
int (*enable_mem_mgmt)(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream);
int (*enable_mem_mgmt)(struct tegra_soc_hwpm *hwpm);
int (*invalidate_mem_config)(struct tegra_soc_hwpm *hwpm);
int (*stream_mem_bytes)(struct tegra_soc_hwpm *hwpm);
int (*disable_pma_streaming)(struct tegra_soc_hwpm *hwpm);
@@ -395,9 +394,11 @@ struct tegra_soc_hwpm_chip {
void (*release_sw_setup)(struct tegra_soc_hwpm *hwpm);
};
struct allowlist;
extern struct platform_device *tegra_soc_hwpm_pdev;
extern const struct file_operations tegra_soc_hwpm_ops;
struct allowlist;
struct tegra_hwpm_mem_mgmt;
struct tegra_hwpm_allowlist_map;
/* Driver struct */
struct tegra_soc_hwpm {
@@ -422,18 +423,12 @@ struct tegra_soc_hwpm {
struct reset_control *hwpm_rst;
/* Memory Management */
struct dma_buf *stream_dma_buf;
struct dma_buf_attachment *stream_attach;
struct sg_table *stream_sgt;
struct dma_buf *mem_bytes_dma_buf;
struct dma_buf_attachment *mem_bytes_attach;
struct sg_table *mem_bytes_sgt;
void *mem_bytes_kernel;
struct tegra_hwpm_mem_mgmt *mem_mgmt;
struct tegra_hwpm_allowlist_map *alist_map;
/* SW State */
bool bind_completed;
bool device_opened;
u64 full_alist_size;
atomic_t hwpm_in_use;

View File

@@ -0,0 +1,21 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef TEGRA_HWPM_MEM_MGMT_H
#define TEGRA_HWPM_MEM_MGMT_H
#ifdef __KERNEL__
#include <os/linux/mem_mgmt_utils.h>
#endif
#endif /* TEGRA_HWPM_MEM_MGMT_H */

View File

@@ -25,12 +25,12 @@
#include <soc/tegra/fuse.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm_io.h>
#include <tegra_hwpm_ip.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_common.h>
#include <os/linux/mem_mgmt_utils.h>
#include <os/linux/ip_utils.h>
#include <os/linux/regops_utils.h>
#define LA_CLK_RATE 625000000UL
@@ -180,7 +180,7 @@ static int tegra_hwpm_query_allowlist_ioctl(struct tegra_soc_hwpm *hwpm,
if (query_allowlist->allowlist == NULL) {
/* Userspace is querying allowlist size only */
if (hwpm->full_alist_size == 0) {
if (hwpm->alist_map->full_alist_size == 0) {
/*Full alist size is not computed yet */
ret = tegra_hwpm_get_allowlist_size(hwpm);
if (ret != 0) {
@@ -189,7 +189,8 @@ static int tegra_hwpm_query_allowlist_ioctl(struct tegra_soc_hwpm *hwpm,
return ret;
}
}
query_allowlist->allowlist_size = hwpm->full_alist_size;
query_allowlist->allowlist_size =
hwpm->alist_map->full_alist_size;
} else {
/* Concatenate allowlists and return */
ret = tegra_hwpm_map_update_allowlist(hwpm, query_allowlist);
@@ -226,7 +227,7 @@ static int tegra_hwpm_update_get_put_ioctl(struct tegra_soc_hwpm *hwpm,
" after the BIND IOCTL.");
return -EPERM;
}
if (!hwpm->mem_bytes_kernel) {
if (!hwpm->mem_mgmt->mem_bytes_kernel) {
tegra_hwpm_err(hwpm,
"mem_bytes buffer is not mapped in the driver");
return -ENXIO;
@@ -516,6 +517,9 @@ static int tegra_hwpm_release(struct inode *inode, struct file *filp)
goto fail;
}
tegra_hwpm_release_alist_map(hwpm);
tegra_hwpm_release_mem_mgmt(hwpm);
ret = tegra_hwpm_release_hw(hwpm);
if (ret < 0) {
tegra_hwpm_err(hwpm, "Failed to release hw");

View File

@@ -22,38 +22,43 @@
#include <soc/tegra/fuse.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_static_analysis.h>
#include <os/linux/mem_mgmt_utils.h>
static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
{
tegra_hwpm_fn(hwpm, " ");
hwpm->stream_dma_buf = dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
hwpm->mem_mgmt->stream_buf_size = alloc_pma_stream->stream_buf_size;
hwpm->mem_mgmt->stream_dma_buf =
dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
alloc_pma_stream->stream_buf_fd));
if (IS_ERR(hwpm->stream_dma_buf)) {
if (IS_ERR(hwpm->mem_mgmt->stream_dma_buf)) {
tegra_hwpm_err(hwpm, "Unable to get stream dma_buf");
return PTR_ERR(hwpm->stream_dma_buf);
return PTR_ERR(hwpm->mem_mgmt->stream_dma_buf);
}
hwpm->stream_attach = dma_buf_attach(hwpm->stream_dma_buf, hwpm->dev);
if (IS_ERR(hwpm->stream_attach)) {
hwpm->mem_mgmt->stream_attach =
dma_buf_attach(hwpm->mem_mgmt->stream_dma_buf, hwpm->dev);
if (IS_ERR(hwpm->mem_mgmt->stream_attach)) {
tegra_hwpm_err(hwpm, "Unable to attach stream dma_buf");
return PTR_ERR(hwpm->stream_attach);
return PTR_ERR(hwpm->mem_mgmt->stream_attach);
}
hwpm->stream_sgt = dma_buf_map_attachment(hwpm->stream_attach,
DMA_FROM_DEVICE);
if (IS_ERR(hwpm->stream_sgt)) {
hwpm->mem_mgmt->stream_sgt = dma_buf_map_attachment(
hwpm->mem_mgmt->stream_attach, DMA_FROM_DEVICE);
if (IS_ERR(hwpm->mem_mgmt->stream_sgt)) {
tegra_hwpm_err(hwpm, "Unable to map stream attachment");
return PTR_ERR(hwpm->stream_sgt);
return PTR_ERR(hwpm->mem_mgmt->stream_sgt);
}
alloc_pma_stream->stream_buf_pma_va =
sg_dma_address(hwpm->stream_sgt->sgl);
hwpm->mem_mgmt->stream_buf_va =
sg_dma_address(hwpm->mem_mgmt->stream_sgt->sgl);
alloc_pma_stream->stream_buf_pma_va = hwpm->mem_mgmt->stream_buf_va;
if (alloc_pma_stream->stream_buf_pma_va == 0) {
tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA");
return -ENXIO;
@@ -70,34 +75,39 @@ static int tegra_hwpm_dma_map_mem_bytes_buffer(struct tegra_soc_hwpm *hwpm,
{
tegra_hwpm_fn(hwpm, " ");
hwpm->mem_bytes_dma_buf = dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
hwpm->mem_mgmt->mem_bytes_dma_buf =
dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
alloc_pma_stream->mem_bytes_buf_fd));
if (IS_ERR(hwpm->mem_bytes_dma_buf)) {
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf)) {
tegra_hwpm_err(hwpm, "Unable to get mem bytes dma_buf");
return PTR_ERR(hwpm->mem_bytes_dma_buf);
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf);
}
hwpm->mem_bytes_attach = dma_buf_attach(hwpm->mem_bytes_dma_buf,
hwpm->dev);
if (IS_ERR(hwpm->mem_bytes_attach)) {
hwpm->mem_mgmt->mem_bytes_attach = dma_buf_attach(
hwpm->mem_mgmt->mem_bytes_dma_buf, hwpm->dev);
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_attach)) {
tegra_hwpm_err(hwpm, "Unable to attach mem bytes dma_buf");
return PTR_ERR(hwpm->mem_bytes_attach);
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_attach);
}
hwpm->mem_bytes_sgt = dma_buf_map_attachment(hwpm->mem_bytes_attach,
DMA_FROM_DEVICE);
if (IS_ERR(hwpm->mem_bytes_sgt)) {
hwpm->mem_mgmt->mem_bytes_sgt = dma_buf_map_attachment(
hwpm->mem_mgmt->mem_bytes_attach, DMA_FROM_DEVICE);
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_sgt)) {
tegra_hwpm_err(hwpm, "Unable to map mem bytes attachment");
return PTR_ERR(hwpm->mem_bytes_sgt);
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_sgt);
}
hwpm->mem_bytes_kernel = dma_buf_vmap(hwpm->mem_bytes_dma_buf);
if (!hwpm->mem_bytes_kernel) {
hwpm->mem_mgmt->mem_bytes_buf_va =
sg_dma_address(hwpm->mem_mgmt->mem_bytes_sgt->sgl);
hwpm->mem_mgmt->mem_bytes_kernel =
dma_buf_vmap(hwpm->mem_mgmt->mem_bytes_dma_buf);
if (!hwpm->mem_mgmt->mem_bytes_kernel) {
tegra_hwpm_err(hwpm,
"Unable to map mem_bytes buffer into kernel VA space");
return -ENOMEM;
}
memset(hwpm->mem_bytes_kernel, 0, 32);
memset(hwpm->mem_mgmt->mem_bytes_kernel, 0, 32);
return 0;
}
@@ -106,45 +116,54 @@ static int tegra_hwpm_reset_stream_buf(struct tegra_soc_hwpm *hwpm)
{
tegra_hwpm_fn(hwpm, " ");
if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) {
dma_buf_unmap_attachment(hwpm->stream_attach,
hwpm->stream_sgt,
DMA_FROM_DEVICE);
if (hwpm->mem_mgmt->stream_sgt &&
(!IS_ERR(hwpm->mem_mgmt->stream_sgt))) {
dma_buf_unmap_attachment(hwpm->mem_mgmt->stream_attach,
hwpm->mem_mgmt->stream_sgt, DMA_FROM_DEVICE);
}
hwpm->stream_sgt = NULL;
hwpm->mem_mgmt->stream_sgt = NULL;
if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) {
dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach);
if (hwpm->mem_mgmt->stream_attach &&
(!IS_ERR(hwpm->mem_mgmt->stream_attach))) {
dma_buf_detach(hwpm->mem_mgmt->stream_dma_buf,
hwpm->mem_mgmt->stream_attach);
}
hwpm->stream_attach = NULL;
hwpm->mem_mgmt->stream_attach = NULL;
hwpm->mem_mgmt->stream_buf_size = 0ULL;
hwpm->mem_mgmt->stream_buf_va = 0ULL;
if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) {
dma_buf_put(hwpm->stream_dma_buf);
if (hwpm->mem_mgmt->stream_dma_buf &&
(!IS_ERR(hwpm->mem_mgmt->stream_dma_buf))) {
dma_buf_put(hwpm->mem_mgmt->stream_dma_buf);
}
hwpm->stream_dma_buf = NULL;
hwpm->mem_mgmt->stream_dma_buf = NULL;
if (hwpm->mem_bytes_kernel) {
dma_buf_vunmap(hwpm->mem_bytes_dma_buf,
hwpm->mem_bytes_kernel);
hwpm->mem_bytes_kernel = NULL;
if (hwpm->mem_mgmt->mem_bytes_kernel) {
dma_buf_vunmap(hwpm->mem_mgmt->mem_bytes_dma_buf,
hwpm->mem_mgmt->mem_bytes_kernel);
hwpm->mem_mgmt->mem_bytes_kernel = NULL;
}
if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) {
dma_buf_unmap_attachment(hwpm->mem_bytes_attach,
hwpm->mem_bytes_sgt,
DMA_FROM_DEVICE);
if (hwpm->mem_mgmt->mem_bytes_sgt &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_sgt))) {
dma_buf_unmap_attachment(hwpm->mem_mgmt->mem_bytes_attach,
hwpm->mem_mgmt->mem_bytes_sgt, DMA_FROM_DEVICE);
}
hwpm->mem_bytes_sgt = NULL;
hwpm->mem_mgmt->mem_bytes_sgt = NULL;
hwpm->mem_mgmt->mem_bytes_buf_va = 0ULL;
if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) {
dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach);
if (hwpm->mem_mgmt->mem_bytes_attach &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_attach))) {
dma_buf_detach(hwpm->mem_mgmt->mem_bytes_dma_buf,
hwpm->mem_mgmt->mem_bytes_attach);
}
hwpm->mem_bytes_attach = NULL;
hwpm->mem_mgmt->mem_bytes_attach = NULL;
if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) {
dma_buf_put(hwpm->mem_bytes_dma_buf);
if (hwpm->mem_mgmt->mem_bytes_dma_buf &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf))) {
dma_buf_put(hwpm->mem_mgmt->mem_bytes_dma_buf);
}
hwpm->mem_bytes_dma_buf = NULL;
hwpm->mem_mgmt->mem_bytes_dma_buf = NULL;
return 0;
}
@@ -156,6 +175,17 @@ int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
tegra_hwpm_fn(hwpm, " ");
if (hwpm->mem_mgmt == NULL) {
/* Allocate tegra_hwpm_mem_mgmt */
hwpm->mem_mgmt = tegra_hwpm_kzalloc(hwpm,
sizeof(struct tegra_hwpm_mem_mgmt));
if (!hwpm->mem_mgmt) {
tegra_hwpm_err(NULL,
"Couldn't allocate memory for mem_mgmt struct");
return -ENOMEM;
}
}
/* Memory map stream buffer */
ret = tegra_hwpm_dma_map_stream_buffer(hwpm, alloc_pma_stream);
if (ret != 0) {
@@ -171,7 +201,7 @@ int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
}
/* Configure memory management */
ret = hwpm->active_chip->enable_mem_mgmt(hwpm, alloc_pma_stream);
ret = hwpm->active_chip->enable_mem_mgmt(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to configure stream memory");
goto fail;
@@ -200,6 +230,8 @@ fail:
tegra_hwpm_err(hwpm, "Failed to reset stream buffer");
}
tegra_hwpm_release_mem_mgmt(hwpm);
return ret;
}
@@ -210,10 +242,11 @@ int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm)
tegra_hwpm_fn(hwpm, " ");
/* Stream MEM_BYTES to clear pipeline */
if (hwpm->mem_bytes_kernel) {
if (hwpm->mem_mgmt->mem_bytes_kernel) {
s32 timeout_msecs = 1000;
u32 sleep_msecs = 100;
u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel);
u32 *mem_bytes_kernel_u32 =
(u32 *)(hwpm->mem_mgmt->mem_bytes_kernel);
do {
ret = hwpm->active_chip->stream_mem_bytes(hwpm);
@@ -265,6 +298,12 @@ int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
tegra_hwpm_fn(hwpm, " ");
if (!hwpm->mem_mgmt->mem_bytes_kernel) {
tegra_hwpm_err(hwpm,
"mem_bytes buffer is not mapped in the driver");
return -ENXIO;
}
/* Update SW get pointer */
ret = hwpm->active_chip->update_mem_bytes_get_ptr(hwpm,
update_get_put->mem_bump);
@@ -306,12 +345,8 @@ int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
{
int err = 0;
u64 pinned_pages = 0;
u64 page_idx = 0;
u64 alist_buf_size = 0;
u64 num_pages = 0;
u64 *full_alist_u64 = NULL;
void *full_alist = NULL;
struct page **pages = NULL;
struct tegra_soc_hwpm_query_allowlist *query_allowlist =
(struct tegra_soc_hwpm_query_allowlist *)ioctl_struct;
unsigned long user_va = (unsigned long)(query_allowlist->allowlist);
@@ -319,13 +354,25 @@ int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
tegra_hwpm_fn(hwpm, " ");
if (hwpm->full_alist_size == 0ULL) {
if (hwpm->alist_map->full_alist_size == 0ULL) {
tegra_hwpm_err(hwpm, "Invalid allowlist size");
return -EINVAL;
}
alist_buf_size = tegra_hwpm_safe_mult_u64(hwpm->full_alist_size,
hwpm->active_chip->get_alist_buf_size(hwpm));
if (hwpm->alist_map == NULL) {
/* Allocate tegra_hwpm_allowlist_map */
hwpm->alist_map = tegra_hwpm_kzalloc(hwpm,
sizeof(struct tegra_hwpm_allowlist_map));
if (!hwpm->alist_map) {
tegra_hwpm_err(NULL,
"Couldn't allocate allowlist map structure");
return -ENOMEM;
}
}
alist_buf_size =
tegra_hwpm_safe_mult_u64(hwpm->alist_map->full_alist_size,
hwpm->active_chip->get_alist_buf_size(hwpm));
tegra_hwpm_dbg(hwpm, hwpm_info | hwpm_dbg_allowlist,
"alist_buf_size 0x%llx", alist_buf_size);
@@ -336,54 +383,72 @@ int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
/* Round-up and Divide */
alist_buf_size = tegra_hwpm_safe_sub_u64(
tegra_hwpm_safe_add_u64(alist_buf_size, PAGE_SIZE), 1ULL);
num_pages = alist_buf_size / PAGE_SIZE;
hwpm->alist_map->num_pages = alist_buf_size / PAGE_SIZE;
pages = tegra_hwpm_kcalloc(hwpm, num_pages, sizeof(*pages));
if (!pages) {
hwpm->alist_map->pages = (struct page **)tegra_hwpm_kcalloc(
hwpm, hwpm->alist_map->num_pages, sizeof(struct page *));
if (!hwpm->alist_map->pages) {
tegra_hwpm_err(hwpm,
"Couldn't allocate memory for pages array");
err = -ENOMEM;
goto alist_unmap;
goto fail;
}
pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0,
pages, NULL);
if (pinned_pages != num_pages) {
pinned_pages = get_user_pages(user_va & PAGE_MASK,
hwpm->alist_map->num_pages, 0, hwpm->alist_map->pages, NULL);
if (pinned_pages != hwpm->alist_map->num_pages) {
tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages",
num_pages, pinned_pages);
hwpm->alist_map->num_pages, pinned_pages);
err = -ENOMEM;
goto alist_unmap;
goto fail;
}
full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
if (!full_alist) {
tegra_hwpm_err(hwpm, "Couldn't map allowlist buffer into"
" kernel address space");
hwpm->alist_map->full_alist_map = vmap(hwpm->alist_map->pages,
hwpm->alist_map->num_pages, VM_MAP, PAGE_KERNEL);
if (!hwpm->alist_map->full_alist_map) {
tegra_hwpm_err(hwpm,
"Couldn't map allowlist buffer in kernel addr space");
err = -ENOMEM;
goto alist_unmap;
goto fail;
}
full_alist_u64 = (u64 *)(full_alist + offset);
full_alist_u64 = (u64 *)(hwpm->alist_map->full_alist_map + offset);
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
if (err != 0) {
goto alist_unmap;
goto fail;
}
query_allowlist->allowlist_size = hwpm->full_alist_size;
query_allowlist->allowlist_size = hwpm->alist_map->full_alist_size;
return 0;
alist_unmap:
if (full_alist)
vunmap(full_alist);
if (pinned_pages > 0) {
for (page_idx = 0ULL; page_idx < pinned_pages; page_idx++) {
set_page_dirty(pages[page_idx]);
put_page(pages[page_idx]);
}
}
if (pages) {
tegra_hwpm_kfree(hwpm, pages);
}
fail:
tegra_hwpm_release_alist_map(hwpm);
return err;
}
void tegra_hwpm_release_alist_map(struct tegra_soc_hwpm *hwpm)
{
u64 idx = 0U;
if (hwpm->alist_map->full_alist_map) {
vunmap(hwpm->alist_map->full_alist_map);
}
for (idx = 0ULL; idx < hwpm->alist_map->num_pages; idx++) {
set_page_dirty(hwpm->alist_map->pages[idx]);
put_page(hwpm->alist_map->pages[idx]);
}
if (hwpm->alist_map->pages) {
tegra_hwpm_kfree(hwpm, hwpm->alist_map->pages);
}
if (hwpm->alist_map) {
tegra_hwpm_kfree(hwpm, hwpm->alist_map);
}
}
void tegra_hwpm_release_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
tegra_hwpm_kfree(hwpm, hwpm->mem_mgmt);
}

View File

@@ -14,9 +14,38 @@
#ifndef TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H
#define TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H
#include <linux/types.h>
/* This macro is copy of TEGRA_SOC_HWPM_MEM_BYTES_INVALID */
#define TEGRA_HWPM_MEM_BYTES_INVALID 0xffffffff
struct tegra_soc_hwpm;
struct tegra_soc_hwpm_alloc_pma_stream;
struct tegra_soc_hwpm_update_get_put;
struct sg_table;
struct dma_buf;
struct dma_buf_attachment;
struct tegra_soc_hwpm_update_get_put;
struct tegra_hwpm_mem_mgmt {
struct sg_table *stream_sgt;
struct sg_table *mem_bytes_sgt;
struct dma_buf *stream_dma_buf;
struct dma_buf_attachment *stream_attach;
u64 stream_buf_size;
u64 stream_buf_va;
struct dma_buf *mem_bytes_dma_buf;
struct dma_buf_attachment *mem_bytes_attach;
u64 mem_bytes_buf_va;
void *mem_bytes_kernel;
};
struct tegra_hwpm_allowlist_map {
u64 full_alist_size;
u64 num_pages;
struct page **pages;
void *full_alist_map;
};
int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream);
@@ -25,5 +54,7 @@ int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_update_get_put *update_get_put);
int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct);
void tegra_hwpm_release_alist_map(struct tegra_soc_hwpm *hwpm);
void tegra_hwpm_release_mem_mgmt(struct tegra_soc_hwpm *hwpm);
#endif /* TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H */