tegra: hwpm: move mem_buf functions to os folder

PMA memory buffer functions use linux specific APIs for dma management.
In an effort to make HWPM driver OS agnostic, move the memory buffer
functions to os/linux path.

Jira THWPM-59

Change-Id: I3dbf577921faed579bbd9de3231b26a9acad28ba
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2738154
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Vasuki Shankar <vasukis@nvidia.com>
Reviewed-by: Seema Khowala <seemaj@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
Vedashree Vidwans
2022-06-29 15:36:56 -07:00
committed by mobile promotions
parent 37dc9132f2
commit 221e73d921
6 changed files with 133 additions and 109 deletions

View File

@@ -15,11 +15,11 @@ obj-y += os/linux/ip_utils.o
obj-y += os/linux/ioctl.o
obj-y += os/linux/kmem.o
obj-y += os/linux/log.o
obj-y += os/linux/mem_mgmt_utils.o
obj-y += common/allowlist.o
obj-y += common/aperture.o
obj-y += common/ip.o
obj-y += common/mem_buf.o
obj-y += common/regops.o
obj-y += common/resource.o
obj-y += common/init.o

View File

@@ -47,7 +47,7 @@ int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm)
return 0;
}
static int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
{
struct tegra_hwpm_func_args func_args;
int err = 0;
@@ -74,90 +74,3 @@ static int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist)
return err;
}
int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct)
{
int err = 0;
u64 pinned_pages = 0;
u64 page_idx = 0;
u64 alist_buf_size = 0;
u64 num_pages = 0;
u64 *full_alist_u64 = NULL;
void *full_alist = NULL;
struct page **pages = NULL;
struct tegra_soc_hwpm_query_allowlist *query_allowlist =
(struct tegra_soc_hwpm_query_allowlist *)ioctl_struct;
unsigned long user_va = (unsigned long)(query_allowlist->allowlist);
unsigned long offset = user_va & ~PAGE_MASK;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->full_alist_size == 0ULL) {
tegra_hwpm_err(hwpm, "Invalid allowlist size");
return -EINVAL;
}
alist_buf_size = tegra_hwpm_safe_mult_u64(hwpm->full_alist_size,
hwpm->active_chip->get_alist_buf_size(hwpm));
tegra_hwpm_dbg(hwpm, hwpm_info | hwpm_dbg_allowlist,
"alist_buf_size 0x%llx", alist_buf_size);
/* Memory map user buffer into kernel address space */
alist_buf_size = tegra_hwpm_safe_add_u64(offset, alist_buf_size);
/* Round-up and Divide */
alist_buf_size = tegra_hwpm_safe_sub_u64(
tegra_hwpm_safe_add_u64(alist_buf_size, PAGE_SIZE), 1ULL);
num_pages = alist_buf_size / PAGE_SIZE;
pages = tegra_hwpm_kcalloc(hwpm, num_pages, sizeof(*pages));
if (!pages) {
tegra_hwpm_err(hwpm,
"Couldn't allocate memory for pages array");
err = -ENOMEM;
goto alist_unmap;
}
pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0,
pages, NULL);
if (pinned_pages != num_pages) {
tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages",
num_pages, pinned_pages);
err = -ENOMEM;
goto alist_unmap;
}
full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
if (!full_alist) {
tegra_hwpm_err(hwpm, "Couldn't map allowlist buffer into"
" kernel address space");
err = -ENOMEM;
goto alist_unmap;
}
full_alist_u64 = (u64 *)(full_alist + offset);
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
if (err != 0) {
goto alist_unmap;
}
query_allowlist->allowlist_size = hwpm->full_alist_size;
return 0;
alist_unmap:
if (full_alist)
vunmap(full_alist);
if (pinned_pages > 0) {
for (page_idx = 0ULL; page_idx < pinned_pages; page_idx++) {
set_page_dirty(pages[page_idx]);
put_page(pages[page_idx]);
}
}
if (pages) {
tegra_hwpm_kfree(hwpm, pages);
}
return err;
}

View File

@@ -64,8 +64,7 @@ int tegra_hwpm_ip_handle_power_mgmt(struct tegra_soc_hwpm *hwpm,
struct hwpm_ip_inst *ip_inst, bool disable);
int tegra_hwpm_get_allowlist_size(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct);
int tegra_hwpm_combine_alist(struct tegra_soc_hwpm *hwpm, u64 *alist);
int tegra_hwpm_exec_regops(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_exec_reg_ops *exec_reg_ops);
@@ -80,10 +79,4 @@ int tegra_hwpm_get_floorsweep_info(struct tegra_soc_hwpm *hwpm,
int tegra_hwpm_get_resource_info(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_resource_info *rsrc_info);
int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream);
int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_update_get_put *update_get_put);
#endif /* TEGRA_HWPM_COMMON_H */

View File

@@ -29,6 +29,7 @@
#include <tegra_hwpm_io.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_common.h>
#include <os/linux/mem_mgmt_utils.h>
#define LA_CLK_RATE 625000000UL
@@ -189,7 +190,7 @@ static int tegra_hwpm_query_allowlist_ioctl(struct tegra_soc_hwpm *hwpm,
query_allowlist->allowlist_size = hwpm->full_alist_size;
} else {
/* Concatenate allowlists and return */
ret = tegra_hwpm_update_allowlist(hwpm, query_allowlist);
ret = tegra_hwpm_map_update_allowlist(hwpm, query_allowlist);
if (ret != 0) {
tegra_hwpm_err(hwpm, "Failed to update full alist");
return ret;

View File

@@ -24,8 +24,10 @@
#include <tegra_hwpm_log.h>
#include <tegra_hwpm.h>
#include <tegra_hwpm_kmem.h>
#include <tegra_hwpm_common.h>
#include <tegra_hwpm_static_analysis.h>
#include <os/linux/mem_mgmt_utils.h>
static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream)
@@ -50,6 +52,16 @@ static int tegra_hwpm_dma_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
return PTR_ERR(hwpm->stream_sgt);
}
alloc_pma_stream->stream_buf_pma_va =
sg_dma_address(hwpm->stream_sgt->sgl);
if (alloc_pma_stream->stream_buf_pma_va == 0) {
tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA");
return -ENXIO;
}
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream,
"stream_buf_pma_va = 0x%llx",
alloc_pma_stream->stream_buf_pma_va);
return 0;
}
@@ -151,17 +163,6 @@ int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
goto fail;
}
alloc_pma_stream->stream_buf_pma_va =
sg_dma_address(hwpm->stream_sgt->sgl);
if (alloc_pma_stream->stream_buf_pma_va == 0) {
tegra_hwpm_err(hwpm, "Invalid stream buffer SMMU IOVA");
ret = -ENXIO;
goto fail;
}
tegra_hwpm_dbg(hwpm, hwpm_dbg_alloc_pma_stream,
"stream_buf_pma_va = 0x%llx",
alloc_pma_stream->stream_buf_pma_va);
/* Memory map mem bytes buffer */
ret = tegra_hwpm_dma_map_mem_bytes_buffer(hwpm, alloc_pma_stream);
if (ret != 0) {
@@ -299,3 +300,90 @@ int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
return 0;
}
int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct)
{
int err = 0;
u64 pinned_pages = 0;
u64 page_idx = 0;
u64 alist_buf_size = 0;
u64 num_pages = 0;
u64 *full_alist_u64 = NULL;
void *full_alist = NULL;
struct page **pages = NULL;
struct tegra_soc_hwpm_query_allowlist *query_allowlist =
(struct tegra_soc_hwpm_query_allowlist *)ioctl_struct;
unsigned long user_va = (unsigned long)(query_allowlist->allowlist);
unsigned long offset = user_va & ~PAGE_MASK;
tegra_hwpm_fn(hwpm, " ");
if (hwpm->full_alist_size == 0ULL) {
tegra_hwpm_err(hwpm, "Invalid allowlist size");
return -EINVAL;
}
alist_buf_size = tegra_hwpm_safe_mult_u64(hwpm->full_alist_size,
hwpm->active_chip->get_alist_buf_size(hwpm));
tegra_hwpm_dbg(hwpm, hwpm_info | hwpm_dbg_allowlist,
"alist_buf_size 0x%llx", alist_buf_size);
/* Memory map user buffer into kernel address space */
alist_buf_size = tegra_hwpm_safe_add_u64(offset, alist_buf_size);
/* Round-up and Divide */
alist_buf_size = tegra_hwpm_safe_sub_u64(
tegra_hwpm_safe_add_u64(alist_buf_size, PAGE_SIZE), 1ULL);
num_pages = alist_buf_size / PAGE_SIZE;
pages = tegra_hwpm_kcalloc(hwpm, num_pages, sizeof(*pages));
if (!pages) {
tegra_hwpm_err(hwpm,
"Couldn't allocate memory for pages array");
err = -ENOMEM;
goto alist_unmap;
}
pinned_pages = get_user_pages(user_va & PAGE_MASK, num_pages, 0,
pages, NULL);
if (pinned_pages != num_pages) {
tegra_hwpm_err(hwpm, "Requested %llu pages / Got %ld pages",
num_pages, pinned_pages);
err = -ENOMEM;
goto alist_unmap;
}
full_alist = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
if (!full_alist) {
tegra_hwpm_err(hwpm, "Couldn't map allowlist buffer into"
" kernel address space");
err = -ENOMEM;
goto alist_unmap;
}
full_alist_u64 = (u64 *)(full_alist + offset);
err = tegra_hwpm_combine_alist(hwpm, full_alist_u64);
if (err != 0) {
goto alist_unmap;
}
query_allowlist->allowlist_size = hwpm->full_alist_size;
return 0;
alist_unmap:
if (full_alist)
vunmap(full_alist);
if (pinned_pages > 0) {
for (page_idx = 0ULL; page_idx < pinned_pages; page_idx++) {
set_page_dirty(pages[page_idx]);
put_page(pages[page_idx]);
}
}
if (pages) {
tegra_hwpm_kfree(hwpm, pages);
}
return err;
}

29
os/linux/mem_mgmt_utils.h Normal file
View File

@@ -0,0 +1,29 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H
#define TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H
struct tegra_soc_hwpm;
struct tegra_soc_hwpm_alloc_pma_stream;
struct tegra_soc_hwpm_update_get_put;
int tegra_hwpm_map_stream_buffer(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_alloc_pma_stream *alloc_pma_stream);
int tegra_hwpm_clear_mem_pipeline(struct tegra_soc_hwpm *hwpm);
int tegra_hwpm_update_mem_bytes(struct tegra_soc_hwpm *hwpm,
struct tegra_soc_hwpm_update_get_put *update_get_put);
int tegra_hwpm_map_update_allowlist(struct tegra_soc_hwpm *hwpm,
void *ioctl_struct);
#endif /* TEGRA_HWPM_OS_LINUX_MEM_MGMT_UTILS_H */