mirror of
git://nv-tegra.nvidia.com/linux-hwpm.git
synced 2025-12-23 01:35:10 +03:00
PMA memory buffer functions use linux specific APIs for dma management. In an effort to make HWPM driver OS agnostic, move the memory buffer functions to os/linux path. Jira THWPM-59 Change-Id: I3dbf577921faed579bbd9de3231b26a9acad28ba Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2738154 Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: Vasuki Shankar <vasukis@nvidia.com> Reviewed-by: Seema Khowala <seemaj@nvidia.com> GVS: Gerrit_Virtual_Submit
390 lines
10 KiB
C
390 lines
10 KiB
C
/*
|
|
* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/dma-buf.h>
|
|
#include <soc/tegra/fuse.h>
|
|
#include <uapi/linux/tegra-soc-hwpm-uapi.h>
|
|
|
|
#include <tegra_hwpm_log.h>
|
|
#include <tegra_hwpm.h>
|
|
#include <tegra_hwpm_kmem.h>
|
|
#include <tegra_hwpm_common.h>
|
|
#include <tegra_hwpm_static_analysis.h>
|
|
#include <os/linux/mem_mgmt_utils.h>
|
|
|
|
static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
|
|
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
|
|
{
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
hwpm->stream_dma_buf = dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
|
|
alloc_pma_stream->stream_buf_fd));
|
|
if (IS_ERR(hwpm->stream_dma_buf)) {
|
|
tegra_hwpm_err(hwpm, "Unable to get stream dma_buf");
|
|
return PTR_ERR(hwpm->stream_dma_buf);
|
|
}
|
|
hwpm->stream_attach = dma_buf_attach(hwpm->stream_dma_buf, hwpm->dev);
|
|
if (IS_ERR(hwpm->stream_attach)) {
|
|
tegra_hwpm_err(hwpm, "Unable to attach stream dma_buf");
|
|
return PTR_ERR(hwpm->stream_attach);
|
|
}
|
|
hwpm->stream_sgt = dma_buf_map_attachment(hwpm->stream_attach,
|
|
DMA_FROM_DEVICE);
|
|
if (IS_ERR(hwpm->stream_sgt)) {
|
|
tegra_hwpm_err(hwpm, "Unable to map stream attachment");
|
|
return PTR_ERR(hwpm->stream_sgt);
|
|
}
|
|
|
|
alloc_pma_stream->stream_buf_pma_va =
|
|
sg_dma_address(hwpm->stream_sgt->sgl);
|
|
if (alloc_pma_stream->stream_buf_pma_va == 0) {
|
|
tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA");
|
|
return -ENXIO;
|
|
}
|
|
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream,
|
|
"stream_buf_pma_va = 0x%llx",
|
|
alloc_pma_stream->stream_buf_pma_va);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int tegra_hwpm_dma_map_mem_bytes_buffer(struct tegra_soc_hwpm *hwpm,
|
|
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
|
|
{
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
hwpm->mem_bytes_dma_buf = dma_buf_get(tegra_hwpm_safe_cast_u64_to_s32(
|
|
alloc_pma_stream->mem_bytes_buf_fd));
|
|
if (IS_ERR(hwpm->mem_bytes_dma_buf)) {
|
|
tegra_hwpm_err(hwpm, "Unable to get mem bytes dma_buf");
|
|
return PTR_ERR(hwpm->mem_bytes_dma_buf);
|
|
}
|
|
|
|
hwpm->mem_bytes_attach = dma_buf_attach(hwpm->mem_bytes_dma_buf,
|
|
hwpm->dev);
|
|
if (IS_ERR(hwpm->mem_bytes_attach)) {
|
|
tegra_hwpm_err(hwpm, "Unable to attach mem bytes dma_buf");
|
|
return PTR_ERR(hwpm->mem_bytes_attach);
|
|
}
|
|
|
|
hwpm->mem_bytes_sgt = dma_buf_map_attachment(hwpm->mem_bytes_attach,
|
|
DMA_FROM_DEVICE);
|
|
if (IS_ERR(hwpm->mem_bytes_sgt)) {
|
|
tegra_hwpm_err(hwpm, "Unable to map mem bytes attachment");
|
|
return PTR_ERR(hwpm->mem_bytes_sgt);
|
|
}
|
|
|
|
hwpm->mem_bytes_kernel = dma_buf_vmap(hwpm->mem_bytes_dma_buf);
|
|
if (!hwpm->mem_bytes_kernel) {
|
|
tegra_hwpm_err(hwpm,
|
|
"Unable to map mem_bytes buffer into kernel VA space");
|
|
return -ENOMEM;
|
|
}
|
|
memset(hwpm->mem_bytes_kernel, 0, 32);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int tegra_hwpm_reset_stream_buf(struct tegra_soc_hwpm *hwpm)
|
|
{
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
if (hwpm->stream_sgt && (!IS_ERR(hwpm->stream_sgt))) {
|
|
dma_buf_unmap_attachment(hwpm->stream_attach,
|
|
hwpm->stream_sgt,
|
|
DMA_FROM_DEVICE);
|
|
}
|
|
hwpm->stream_sgt = NULL;
|
|
|
|
if (hwpm->stream_attach && (!IS_ERR(hwpm->stream_attach))) {
|
|
dma_buf_detach(hwpm->stream_dma_buf, hwpm->stream_attach);
|
|
}
|
|
hwpm->stream_attach = NULL;
|
|
|
|
if (hwpm->stream_dma_buf && (!IS_ERR(hwpm->stream_dma_buf))) {
|
|
dma_buf_put(hwpm->stream_dma_buf);
|
|
}
|
|
hwpm->stream_dma_buf = NULL;
|
|
|
|
if (hwpm->mem_bytes_kernel) {
|
|
dma_buf_vunmap(hwpm->mem_bytes_dma_buf,
|
|
hwpm->mem_bytes_kernel);
|
|
hwpm->mem_bytes_kernel = NULL;
|
|
}
|
|
|
|
if (hwpm->mem_bytes_sgt && (!IS_ERR(hwpm->mem_bytes_sgt))) {
|
|
dma_buf_unmap_attachment(hwpm->mem_bytes_attach,
|
|
hwpm->mem_bytes_sgt,
|
|
DMA_FROM_DEVICE);
|
|
}
|
|
hwpm->mem_bytes_sgt = NULL;
|
|
|
|
if (hwpm->mem_bytes_attach && (!IS_ERR(hwpm->mem_bytes_attach))) {
|
|
dma_buf_detach(hwpm->mem_bytes_dma_buf, hwpm->mem_bytes_attach);
|
|
}
|
|
hwpm->mem_bytes_attach = NULL;
|
|
|
|
if (hwpm->mem_bytes_dma_buf && (!IS_ERR(hwpm->mem_bytes_dma_buf))) {
|
|
dma_buf_put(hwpm->mem_bytes_dma_buf);
|
|
}
|
|
hwpm->mem_bytes_dma_buf = NULL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
|
|
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
|
|
{
|
|
int ret = 0, err = 0;
|
|
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
/* Memory map stream buffer */
|
|
ret = tegra_hwpm_dma_map_stream_buffer(hwpm, alloc_pma_stream);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to map stream buffer");
|
|
goto fail;
|
|
}
|
|
|
|
/* Memory map mem bytes buffer */
|
|
ret = tegra_hwpm_dma_map_mem_bytes_buffer(hwpm, alloc_pma_stream);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to map mem bytes buffer");
|
|
goto fail;
|
|
}
|
|
|
|
/* Configure memory management */
|
|
ret = hwpm->active_chip->enable_mem_mgmt(hwpm, alloc_pma_stream);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to configure stream memory");
|
|
goto fail;
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
/* Invalidate memory config */
|
|
err = hwpm->active_chip->invalidate_mem_config(hwpm);
|
|
if (err != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to invalidate memory config");
|
|
}
|
|
|
|
/* Disable memory management */
|
|
err = hwpm->active_chip->disable_mem_mgmt(hwpm);
|
|
if (err != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to disable memory management");
|
|
}
|
|
|
|
alloc_pma_stream->stream_buf_pma_va = 0;
|
|
|
|
/* Reset stream buffer */
|
|
err = tegra_hwpm_reset_stream_buf(hwpm);
|
|
if (err != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to reset stream buffer");
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm)
|
|
{
|
|
int ret = 0;
|
|
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
/* Stream MEM_BYTES to clear pipeline */
|
|
if (hwpm->mem_bytes_kernel) {
|
|
s32 timeout_msecs = 1000;
|
|
u32 sleep_msecs = 100;
|
|
u32 *mem_bytes_kernel_u32 = (u32 *)(hwpm->mem_bytes_kernel);
|
|
|
|
do {
|
|
ret = hwpm->active_chip->stream_mem_bytes(hwpm);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm,
|
|
"Trigger mem_bytes streaming failed");
|
|
goto fail;
|
|
}
|
|
msleep(sleep_msecs);
|
|
timeout_msecs -= sleep_msecs;
|
|
} while ((*mem_bytes_kernel_u32 ==
|
|
TEGRA_SOC_HWPM_MEM_BYTES_INVALID) &&
|
|
(timeout_msecs > 0));
|
|
|
|
if (timeout_msecs <= 0) {
|
|
tegra_hwpm_err(hwpm,
|
|
"Timeout expired for MEM_BYTES streaming");
|
|
return -ETIMEDOUT;
|
|
}
|
|
}
|
|
|
|
ret = hwpm->active_chip->disable_pma_streaming(hwpm);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to disable pma streaming");
|
|
goto fail;
|
|
}
|
|
|
|
/* Disable memory management */
|
|
ret = hwpm->active_chip->disable_mem_mgmt(hwpm);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to disable memory management");
|
|
goto fail;
|
|
}
|
|
|
|
/* Reset stream buffer */
|
|
ret = tegra_hwpm_reset_stream_buf(hwpm);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to reset stream buffer");
|
|
goto fail;
|
|
}
|
|
fail:
|
|
return ret;
|
|
}
|
|
|
|
int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
|
|
struct tegra_soc_hwpm_update_get_put *update_get_put)
|
|
{
|
|
int ret;
|
|
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
/* Update SW get pointer */
|
|
ret = hwpm->active_chip->update_mem_bytes_get_ptr(hwpm,
|
|
update_get_put->mem_bump);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm, "Failed to update mem_bytes get ptr");
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* Stream MEM_BYTES value to MEM_BYTES buffer */
|
|
if (update_get_put->b_stream_mem_bytes) {
|
|
ret = hwpm->active_chip->stream_mem_bytes(hwpm);
|
|
if (ret != 0) {
|
|
tegra_hwpm_err(hwpm,
|
|
"Failed to trigger mem_bytes streaming");
|
|
}
|
|
}
|
|
|
|
/* Read HW put pointer */
|
|
if (update_get_put->b_read_mem_head) {
|
|
update_get_put->mem_head =
|
|
hwpm->active_chip->get_mem_bytes_put_ptr(hwpm);
|
|
tegra_hwpm_dbg(hwpm, hwpm_dbg_update_get_put,
|
|
"MEM_HEAD = 0x%llx", update_get_put->mem_head);
|
|
}
|
|
|
|
/* Check overflow error status */
|
|
if (update_get_put->b_check_overflow) {
|
|
update_get_put->b_overflowed =
|
|
(u8) hwpm->active_chip->membuf_overflow_status(hwpm);
|
|
tegra_hwpm_dbg(hwpm, hwpm_dbg_update_get_put, "OVERFLOWED = %u",
|
|
update_get_put->b_overflowed);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
|
|
void *ioctl_struct)
|
|
{
|
|
int err = 0;
|
|
u64 pinned_pages = 0;
|
|
u64 page_idx = 0;
|
|
u64 alist_buf_size = 0;
|
|
u64 num_pages = 0;
|
|
u64 *full_alist_u64 = NULL;
|
|
void *full_alist = NULL;
|
|
struct page **pages = NULL;
|
|
struct tegra_soc_hwpm_query_allowlist *query_allowlist =
|
|
(struct tegra_soc_hwpm_query_allowlist *)ioctl_struct;
|
|
unsigned long user_va = (unsigned long)(query_allowlist->allowlist);
|
|
unsigned long offset = user_va & ~PAGE_MASK;
|
|
|
|
tegra_hwpm_fn(hwpm, " ");
|
|
|
|
if (hwpm->full_alist_size == 0ULL) {
|
|
tegra_hwpm_err(hwpm, "Invalid allowlist size");
|
|
return -EINVAL;
|
|
}
|
|
|
|
alist_buf_size = tegra_hwpm_safe_mult_u64(hwpm->full_alist_size,
|
|
hwpm->active_chip->get_alist_buf_size(hwpm));
|
|
|
|
tegra_hwpm_dbg(hwpm, hwpm_info | hwpm_dbg_allowlist,
|
|
"alist_buf_size 0x%llx", alist_buf_size);
|
|
|
|
/* Memory map user buffer into kernel address space */
|
|
alist_buf_size = tegra_hwpm_safe_add_u64(offset, alist_buf_size);
|
|
|
|
/* Round-up and Divide */
|
|
alist_buf_size = tegra_hwpm_safe_sub_u64(
|
|
tegra_hwpm_safe_add_u64(alist_buf_size, PAGE_SIZE), 1ULL);
|
|
num_pages = alist_buf_size / PAGE_SIZE;
|
|
|
|
pages = tegra_hwpm_kcalloc(hwpm, num_pages, sizeof(*pages));
|
|
if (!pages) {
|
|
tegra_hwpm_err(hwpm,
|
|
"Couldn't allocate memory for pages array");
|
|
err = -ENOMEM;
|
|
goto alist_unmap;
|
|
}
|
|
|
|
pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0,
|
|
pages, NULL);
|
|
if (pinned_pages != num_pages) {
|
|
tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages",
|
|
num_pages, pinned_pages);
|
|
err = -ENOMEM;
|
|
goto alist_unmap;
|
|
}
|
|
|
|
full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
|
|
if (!full_alist) {
|
|
tegra_hwpm_err(hwpm, "Couldn't map allowlist buffer into"
|
|
" kernel address space");
|
|
err = -ENOMEM;
|
|
goto alist_unmap;
|
|
}
|
|
full_alist_u64 = (u64 *)(full_alist + offset);
|
|
|
|
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
|
|
if (err != 0) {
|
|
goto alist_unmap;
|
|
}
|
|
|
|
query_allowlist->allowlist_size = hwpm->full_alist_size;
|
|
return 0;
|
|
|
|
alist_unmap:
|
|
if (full_alist)
|
|
vunmap(full_alist);
|
|
if (pinned_pages > 0) {
|
|
for (page_idx = 0ULL; page_idx < pinned_pages; page_idx++) {
|
|
set_page_dirty(pages[page_idx]);
|
|
put_page(pages[page_idx]);
|
|
}
|
|
}
|
|
if (pages) {
|
|
tegra_hwpm_kfree(hwpm, pages);
|
|
}
|
|
|
|
return err;
|
|
}
|