Files
linux-hwpm/os/linux/mem_mgmt_utils.c
Vedashree Vidwans 486ec4a24c tegra: hwpm: create memory buffer structures
Stream and allowlist buffers are allocated by the user as dma buffers
and mapped in virtual address space by the driver. The DMA and mapping
functions are linux specific. Hence create memory management and
allowlist linux structures. Add these linux memory structure pointers in
the tegra_hwpm parent structure.

Jira THWPM-60

Change-Id: I2526f2bab835df4c5a922b0b375c22a6247aad30
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2729664
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
2022-08-20 23:29:24 -07:00

455 lines
12 KiB
C

/*
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/of_address.h>
#include <linux/dma-buf.h>
#include <soc/tegra/fuse.h>
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_log.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_mem_mgmt.h>
#include <tegra_hwpm_static_analysis.h>
static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
{
tegra_hwpm_fn(hwpm, " ");
hwpm->mem_mgmt->stream_buf_size = alloc_pma_stream->stream_buf_size;
hwpm->mem_mgmt->stream_dma_buf =
dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
alloc_pma_stream->stream_buf_fd));
if (IS_ERR(hwpm->mem_mgmt->stream_dma_buf)) {
tegra_hwpm_err(hwpm, "Unable to get stream dma_buf");
return PTR_ERR(hwpm->mem_mgmt->stream_dma_buf);
}
hwpm->mem_mgmt->stream_attach =
dma_buf_attach(hwpm->mem_mgmt->stream_dma_buf, hwpm->dev);
if (IS_ERR(hwpm->mem_mgmt->stream_attach)) {
tegra_hwpm_err(hwpm, "Unable to attach stream dma_buf");
return PTR_ERR(hwpm->mem_mgmt->stream_attach);
}
hwpm->mem_mgmt->stream_sgt = dma_buf_map_attachment(
hwpm->mem_mgmt->stream_attach, DMA_FROM_DEVICE);
if (IS_ERR(hwpm->mem_mgmt->stream_sgt)) {
tegra_hwpm_err(hwpm, "Unable to map stream attachment");
return PTR_ERR(hwpm->mem_mgmt->stream_sgt);
}
hwpm->mem_mgmt->stream_buf_va =
sg_dma_address(hwpm->mem_mgmt->stream_sgt->sgl);
alloc_pma_stream->stream_buf_pma_va = hwpm->mem_mgmt->stream_buf_va;
if (alloc_pma_stream->stream_buf_pma_va == 0) {
tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA");
return -ENXIO;
}
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream,
"stream_buf_pma_va = 0x%llx",
alloc_pma_stream->stream_buf_pma_va);
return 0;
}
static int tegra_hwpm_dma_map_mem_bytes_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
{
tegra_hwpm_fn(hwpm, " ");
hwpm->mem_mgmt->mem_bytes_dma_buf =
dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
alloc_pma_stream->mem_bytes_buf_fd));
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf)) {
tegra_hwpm_err(hwpm, "Unable to get mem bytes dma_buf");
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf);
}
hwpm->mem_mgmt->mem_bytes_attach = dma_buf_attach(
hwpm->mem_mgmt->mem_bytes_dma_buf, hwpm->dev);
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_attach)) {
tegra_hwpm_err(hwpm, "Unable to attach mem bytes dma_buf");
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_attach);
}
hwpm->mem_mgmt->mem_bytes_sgt = dma_buf_map_attachment(
hwpm->mem_mgmt->mem_bytes_attach, DMA_FROM_DEVICE);
if (IS_ERR(hwpm->mem_mgmt->mem_bytes_sgt)) {
tegra_hwpm_err(hwpm, "Unable to map mem bytes attachment");
return PTR_ERR(hwpm->mem_mgmt->mem_bytes_sgt);
}
hwpm->mem_mgmt->mem_bytes_buf_va =
sg_dma_address(hwpm->mem_mgmt->mem_bytes_sgt->sgl);
hwpm->mem_mgmt->mem_bytes_kernel =
dma_buf_vmap(hwpm->mem_mgmt->mem_bytes_dma_buf);
if (!hwpm->mem_mgmt->mem_bytes_kernel) {
tegra_hwpm_err(hwpm,
"Unable to map mem_bytes buffer into kernel VA space");
return -ENOMEM;
}
memset(hwpm->mem_mgmt->mem_bytes_kernel, 0, 32);
return 0;
}
static int tegra_hwpm_reset_stream_buf(struct tegra_soc_hwpm *hwpm)
{
tegra_hwpm_fn(hwpm, " ");
if (hwpm->mem_mgmt->stream_sgt &&
(!IS_ERR(hwpm->mem_mgmt->stream_sgt))) {
dma_buf_unmap_attachment(hwpm->mem_mgmt->stream_attach,
hwpm->mem_mgmt->stream_sgt, DMA_FROM_DEVICE);
}
hwpm->mem_mgmt->stream_sgt = NULL;
if (hwpm->mem_mgmt->stream_attach &&
(!IS_ERR(hwpm->mem_mgmt->stream_attach))) {
dma_buf_detach(hwpm->mem_mgmt->stream_dma_buf,
hwpm->mem_mgmt->stream_attach);
}
hwpm->mem_mgmt->stream_attach = NULL;
hwpm->mem_mgmt->stream_buf_size = 0ULL;
hwpm->mem_mgmt->stream_buf_va = 0ULL;
if (hwpm->mem_mgmt->stream_dma_buf &&
(!IS_ERR(hwpm->mem_mgmt->stream_dma_buf))) {
dma_buf_put(hwpm->mem_mgmt->stream_dma_buf);
}
hwpm->mem_mgmt->stream_dma_buf = NULL;
if (hwpm->mem_mgmt->mem_bytes_kernel) {
dma_buf_vunmap(hwpm->mem_mgmt->mem_bytes_dma_buf,
hwpm->mem_mgmt->mem_bytes_kernel);
hwpm->mem_mgmt->mem_bytes_kernel = NULL;
}
if (hwpm->mem_mgmt->mem_bytes_sgt &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_sgt))) {
dma_buf_unmap_attachment(hwpm->mem_mgmt->mem_bytes_attach,
hwpm->mem_mgmt->mem_bytes_sgt, DMA_FROM_DEVICE);
}
hwpm->mem_mgmt->mem_bytes_sgt = NULL;
hwpm->mem_mgmt->mem_bytes_buf_va = 0ULL;
if (hwpm->mem_mgmt->mem_bytes_attach &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_attach))) {
dma_buf_detach(hwpm->mem_mgmt->mem_bytes_dma_buf,
hwpm->mem_mgmt->mem_bytes_attach);
}
hwpm->mem_mgmt->mem_bytes_attach = NULL;
if (hwpm->mem_mgmt->mem_bytes_dma_buf &&
(!IS_ERR(hwpm->mem_mgmt->mem_bytes_dma_buf))) {
dma_buf_put(hwpm->mem_mgmt->mem_bytes_dma_buf);
}
hwpm->mem_mgmt->mem_bytes_dma_buf = NULL;
return 0;
}
int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
{
int ret = 0, err = 0;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->mem_mgmt == NULL) {
/* Allocate tegra_hwpm_mem_mgmt */
hwpm->mem_mgmt = tegra_hwpm_kzalloc(hwpm,
sizeof(struct tegra_hwpm_mem_mgmt));
if (!hwpm->mem_mgmt) {
tegra_hwpm_err(NULL,
"Couldn't allocate memory for mem_mgmt struct");
return -ENOMEM;
}
}
/* Memory map stream buffer */
ret = tegra_hwpm_dma_map_stream_buffer(hwpm, alloc_pma_stream);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to map stream buffer");
goto fail;
}
/* Memory map mem bytes buffer */
ret = tegra_hwpm_dma_map_mem_bytes_buffer(hwpm, alloc_pma_stream);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to map mem bytes buffer");
goto fail;
}
/* Configure memory management */
ret = hwpm->active_chip->enable_mem_mgmt(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to configure stream memory");
goto fail;
}
return 0;
fail:
/* Invalidate memory config */
err = hwpm->active_chip->invalidate_mem_config(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "Failed to invalidate memory config");
}
/* Disable memory management */
err = hwpm->active_chip->disable_mem_mgmt(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "Failed to disable memory management");
}
alloc_pma_stream->stream_buf_pma_va = 0;
/* Reset stream buffer */
err = tegra_hwpm_reset_stream_buf(hwpm);
if (err != 0) {
tegra_hwpm_err(hwpm, "Failed to reset stream buffer");
}
tegra_hwpm_release_mem_mgmt(hwpm);
return ret;
}
int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm)
{
int ret = 0;
tegra_hwpm_fn(hwpm, " ");
/* Stream MEM_BYTES to clear pipeline */
if (hwpm->mem_mgmt->mem_bytes_kernel) {
s32 timeout_msecs = 1000;
u32 sleep_msecs = 100;
u32 *mem_bytes_kernel_u32 =
(u32 *)(hwpm->mem_mgmt->mem_bytes_kernel);
do {
ret = hwpm->active_chip->stream_mem_bytes(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"Trigger mem_bytes streaming failed");
goto fail;
}
msleep(sleep_msecs);
timeout_msecs -= sleep_msecs;
} while ((*mem_bytes_kernel_u32 ==
TEGRA_SOC_HWPM_MEM_BYTES_INVALID) &&
(timeout_msecs > 0));
if (timeout_msecs <= 0) {
tegra_hwpm_err(hwpm,
"Timeout expired for MEM_BYTES streaming");
return -ETIMEDOUT;
}
}
ret = hwpm->active_chip->disable_pma_streaming(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to disable pma streaming");
goto fail;
}
/* Disable memory management */
ret = hwpm->active_chip->disable_mem_mgmt(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to disable memory management");
goto fail;
}
/* Reset stream buffer */
ret = tegra_hwpm_reset_stream_buf(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to reset stream buffer");
goto fail;
}
fail:
return ret;
}
int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_update_get_put *update_get_put)
{
int ret;
tegra_hwpm_fn(hwpm, " ");
if (!hwpm->mem_mgmt->mem_bytes_kernel) {
tegra_hwpm_err(hwpm,
"mem_bytes buffer is not mapped in the driver");
return -ENXIO;
}
/* Update SW get pointer */
ret = hwpm->active_chip->update_mem_bytes_get_ptr(hwpm,
update_get_put->mem_bump);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to update mem_bytes get ptr");
return -EINVAL;
}
/* Stream MEM_BYTES value to MEM_BYTES buffer */
if (update_get_put->b_stream_mem_bytes) {
ret = hwpm->active_chip->stream_mem_bytes(hwpm);
if (ret != 0) {
tegra_hwpm_err(hwpm,
"Failed to trigger mem_bytes streaming");
}
}
/* Read HW put pointer */
if (update_get_put->b_read_mem_head) {
update_get_put->mem_head =
hwpm->active_chip->get_mem_bytes_put_ptr(hwpm);
tegra_hwpm_dbg(hwpm, hwpm_dbg_update_get_put,
"MEM_HEAD = 0x%llx", update_get_put->mem_head);
}
/* Check overflow error status */
if (update_get_put->b_check_overflow) {
update_get_put->b_overflowed =
(u8) hwpm->active_chip->membuf_overflow_status(hwpm);
tegra_hwpm_dbg(hwpm, hwpm_dbg_update_get_put, "OVERFLOWED = %u",
update_get_put->b_overflowed);
}
return 0;
}
int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct)
{
int err = 0;
u64 pinned_pages = 0;
u64 alist_buf_size = 0;
u64 *full_alist_u64 = NULL;
struct tegra_soc_hwpm_query_allowlist *query_allowlist =
(struct tegra_soc_hwpm_query_allowlist *)ioctl_struct;
unsigned long user_va = (unsigned long)(query_allowlist->allowlist);
unsigned long offset = user_va & ~PAGE_MASK;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->alist_map->full_alist_size == 0ULL) {
tegra_hwpm_err(hwpm, "Invalid allowlist size");
return -EINVAL;
}
if (hwpm->alist_map == NULL) {
/* Allocate tegra_hwpm_allowlist_map */
hwpm->alist_map = tegra_hwpm_kzalloc(hwpm,
sizeof(struct tegra_hwpm_allowlist_map));
if (!hwpm->alist_map) {
tegra_hwpm_err(NULL,
"Couldn't allocate allowlist map structure");
return -ENOMEM;
}
}
alist_buf_size =
tegra_hwpm_safe_mult_u64(hwpm->alist_map->full_alist_size,
hwpm->active_chip->get_alist_buf_size(hwpm));
tegra_hwpm_dbg(hwpm, hwpm_info | hwpm_dbg_allowlist,
"alist_buf_size 0x%llx", alist_buf_size);
/* Memory map user buffer into kernel address space */
alist_buf_size = tegra_hwpm_safe_add_u64(offset, alist_buf_size);
/* Round-up and Divide */
alist_buf_size = tegra_hwpm_safe_sub_u64(
tegra_hwpm_safe_add_u64(alist_buf_size, PAGE_SIZE), 1ULL);
hwpm->alist_map->num_pages = alist_buf_size / PAGE_SIZE;
hwpm->alist_map->pages = (struct page **)tegra_hwpm_kcalloc(
hwpm, hwpm->alist_map->num_pages, sizeof(struct page *));
if (!hwpm->alist_map->pages) {
tegra_hwpm_err(hwpm,
"Couldn't allocate memory for pages array");
err = -ENOMEM;
goto fail;
}
pinned_pages = get_user_pages(user_va & PAGE_MASK,
hwpm->alist_map->num_pages, 0, hwpm->alist_map->pages, NULL);
if (pinned_pages != hwpm->alist_map->num_pages) {
tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages",
hwpm->alist_map->num_pages, pinned_pages);
err = -ENOMEM;
goto fail;
}
hwpm->alist_map->full_alist_map = vmap(hwpm->alist_map->pages,
hwpm->alist_map->num_pages, VM_MAP, PAGE_KERNEL);
if (!hwpm->alist_map->full_alist_map) {
tegra_hwpm_err(hwpm,
"Couldn't map allowlist buffer in kernel addr space");
err = -ENOMEM;
goto fail;
}
full_alist_u64 = (u64 *)(hwpm->alist_map->full_alist_map + offset);
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
if (err != 0) {
goto fail;
}
query_allowlist->allowlist_size = hwpm->alist_map->full_alist_size;
return 0;
fail:
tegra_hwpm_release_alist_map(hwpm);
return err;
}
void tegra_hwpm_release_alist_map(struct tegra_soc_hwpm *hwpm)
{
u64 idx = 0U;
if (hwpm->alist_map->full_alist_map) {
vunmap(hwpm->alist_map->full_alist_map);
}
for (idx = 0ULL; idx < hwpm->alist_map->num_pages; idx++) {
set_page_dirty(hwpm->alist_map->pages[idx]);
put_page(hwpm->alist_map->pages[idx]);
}
if (hwpm->alist_map->pages) {
tegra_hwpm_kfree(hwpm, hwpm->alist_map->pages);
}
if (hwpm->alist_map) {
tegra_hwpm_kfree(hwpm, hwpm->alist_map);
}
}
void tegra_hwpm_release_mem_mgmt(struct tegra_soc_hwpm *hwpm)
{
tegra_hwpm_kfree(hwpm, hwpm->mem_mgmt);
}