misc: mods: update from Perforce

Change-Id: I12e626024579fa86ab2f79068bb57e9f41746e8a
Signed-off-by: Ian Grissom <igrissom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2989905
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Reviewed-by: Chris Dragan <kdragan@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Ian Grissom
2023-12-29 18:27:36 +00:00
committed by mobile promotions
parent 288de33b62
commit 426eaddac9
6 changed files with 315 additions and 164 deletions

View File

@@ -146,12 +146,13 @@ struct MODS_MEM_INFO {
*/ */
struct list_head dma_map_list; struct list_head dma_map_list;
u32 num_pages; /* total number of allocated pages */ u32 num_pages; /* total number of allocated pages */
u32 num_chunks; /* number of allocated contig chunks */ u32 num_chunks; /* number of allocated contig chunks */
int numa_node; /* numa node for the allocation */ int numa_node; /* numa node for the allocation */
u8 cache_type : 2; /* MODS_ALLOC_* */ u8 cache_type : 2; /* MODS_ALLOC_* */
u8 dma32 : 1; /* true/false */ u8 dma32 : 1; /* true/false */
u8 force_numa : 1; /* true/false */ u8 force_numa : 1; /* true/false */
u8 reservation_tag; /* zero if not reserved */
struct pci_dev *dev; /* (optional) pci_dev this allocation struct pci_dev *dev; /* (optional) pci_dev this allocation
* is for. * is for.
@@ -380,6 +381,7 @@ const char *mods_get_prot_str(u8 mem_type);
int mods_unregister_all_alloc(struct mods_client *client); int mods_unregister_all_alloc(struct mods_client *client);
struct MODS_MEM_INFO *mods_find_alloc(struct mods_client *client, struct MODS_MEM_INFO *mods_find_alloc(struct mods_client *client,
u64 phys_addr); u64 phys_addr);
void mods_free_mem_reservations(void);
#if defined(CONFIG_PPC64) #if defined(CONFIG_PPC64)
/* ppc64 */ /* ppc64 */
@@ -453,6 +455,12 @@ int esc_mods_iommu_dma_map_memory(struct mods_client *client,
struct MODS_IOMMU_DMA_MAP_MEMORY *p); struct MODS_IOMMU_DMA_MAP_MEMORY *p);
int esc_mods_iommu_dma_unmap_memory(struct mods_client *client, int esc_mods_iommu_dma_unmap_memory(struct mods_client *client,
struct MODS_IOMMU_DMA_MAP_MEMORY *p); struct MODS_IOMMU_DMA_MAP_MEMORY *p);
int esc_mods_reserve_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p);
int esc_mods_get_reserved_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p);
int esc_mods_release_reserved_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p);
#ifdef CONFIG_ARM #ifdef CONFIG_ARM
int esc_mods_memory_barrier(struct mods_client *client); int esc_mods_memory_barrier(struct mods_client *client);

View File

@@ -596,6 +596,7 @@ static void __exit mods_exit_module(void)
#if defined(MODS_HAS_ARM_FFA) #if defined(MODS_HAS_ARM_FFA)
mods_ffa_abi_unregister(); mods_ffa_abi_unregister();
#endif #endif
mods_free_mem_reservations();
mods_info_printk("driver unloaded\n"); mods_info_printk("driver unloaded\n");
LOG_EXT(); LOG_EXT();
} }
@@ -2286,6 +2287,24 @@ static long mods_krnl_ioctl(struct file *fp,
esc_mods_merge_pages, MODS_MERGE_PAGES); esc_mods_merge_pages, MODS_MERGE_PAGES);
break; break;
case MODS_ESC_RESERVE_ALLOCATION:
MODS_IOCTL(MODS_ESC_RESERVE_ALLOCATION,
esc_mods_reserve_allocation,
MODS_RESERVE_ALLOCATION);
break;
case MODS_ESC_GET_RESERVED_ALLOCATION:
MODS_IOCTL(MODS_ESC_GET_RESERVED_ALLOCATION,
esc_mods_get_reserved_allocation,
MODS_RESERVE_ALLOCATION);
break;
case MODS_ESC_RELEASE_RESERVED_ALLOCATION:
MODS_IOCTL(MODS_ESC_RELEASE_RESERVED_ALLOCATION,
esc_mods_release_reserved_allocation,
MODS_RESERVE_ALLOCATION);
break;
case MODS_ESC_GET_PHYSICAL_ADDRESS: case MODS_ESC_GET_PHYSICAL_ADDRESS:
MODS_IOCTL(MODS_ESC_GET_PHYSICAL_ADDRESS, MODS_IOCTL(MODS_ESC_GET_PHYSICAL_ADDRESS,
esc_mods_get_phys_addr, esc_mods_get_phys_addr,
@@ -2780,7 +2799,6 @@ static long mods_krnl_ioctl(struct file *fp,
MODS_IOCTL(MODS_ESC_MODS_SEND_IPI, MODS_IOCTL(MODS_ESC_MODS_SEND_IPI,
esc_mods_send_ipi, MODS_SEND_IPI); esc_mods_send_ipi, MODS_SEND_IPI);
break; break;
#endif #endif
case MODS_ESC_FFA_CMD: case MODS_ESC_FFA_CMD:

View File

@@ -4,6 +4,7 @@
#include "mods_internal.h" #include "mods_internal.h"
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/sched.h> #include <linux/sched.h>
@@ -16,6 +17,16 @@
#include <linux/cache.h> #include <linux/cache.h>
#endif #endif
#define MODS_MEM_MAX_RESERVATIONS 16
/* Structure used by this module to track existing reservations */
struct MODS_MEM_RESERVATION {
struct MODS_MEM_INFO *p_mem_info;
u8 client_id;
};
static struct MODS_MEM_RESERVATION mem_reservations[MODS_MEM_MAX_RESERVATIONS];
DEFINE_MUTEX(mem_reservation_mtx);
static struct MODS_MEM_INFO *get_mem_handle(struct mods_client *client, static struct MODS_MEM_INFO *get_mem_handle(struct mods_client *client,
u64 handle) u64 handle)
{ {
@@ -24,8 +35,7 @@ static struct MODS_MEM_INFO *get_mem_handle(struct mods_client *client,
* accounting. * accounting.
*/ */
if (unlikely((handle + PAGE_SIZE) < (2 * PAGE_SIZE))) { if (unlikely((handle + PAGE_SIZE) < (2 * PAGE_SIZE))) {
cl_error("invalid memory handle 0x%llx\n", cl_error("invalid memory handle 0x%llx\n", (unsigned long long)handle);
(unsigned long long)handle);
return NULL; return NULL;
} }
@@ -42,8 +52,7 @@ static bool validate_mem_handle(struct mods_client *client,
return false; return false;
list_for_each(iter, head) { list_for_each(iter, head) {
struct MODS_MEM_INFO *p_mem = struct MODS_MEM_INFO *p_mem = list_entry(iter, struct MODS_MEM_INFO, list);
list_entry(iter, struct MODS_MEM_INFO, list);
if (p_mem == p_mem_info) if (p_mem == p_mem_info)
return true; return true;
@@ -957,14 +966,22 @@ static int unregister_and_free_alloc(struct mods_client *client,
if (likely(p_mem_info)) { if (likely(p_mem_info)) {
dma_unmap_all(client, p_mem_info, NULL); dma_unmap_all(client, p_mem_info, NULL);
save_non_wb_chunks(client, p_mem_info); if (likely(!p_mem_info->reservation_tag)) {
release_chunks(client, p_mem_info); save_non_wb_chunks(client, p_mem_info);
release_chunks(client, p_mem_info);
pci_dev_put(p_mem_info->dev); pci_dev_put(p_mem_info->dev);
kfree(p_mem_info);
atomic_dec(&client->num_allocs);
kfree(p_mem_info);
} else {
/* Decrement client num_pages manually if not releasing chunks */
atomic_sub((int)p_mem_info->num_pages, &client->num_pages);
mutex_lock(&mem_reservation_mtx);
/* Clear the client_id in the associated reservation */
mem_reservations[p_mem_info->reservation_tag-1].client_id = 0;
mutex_unlock(&mem_reservation_mtx);
}
atomic_dec(&client->num_allocs); /* always decrement to avoid leak */
err = OK; err = OK;
} else { } else {
cl_error("failed to unregister allocation %p\n", p_del_mem); cl_error("failed to unregister allocation %p\n", p_del_mem);
@@ -1384,16 +1401,17 @@ int esc_mods_alloc_pages_2(struct mods_client *client,
init_mem_info(p_mem_info, num_chunks, cache_type); init_mem_info(p_mem_info, num_chunks, cache_type);
p_mem_info->num_pages = num_pages; p_mem_info->num_pages = num_pages;
p_mem_info->dma32 = (p->flags & MODS_ALLOC_DMA32) ? true : false; p_mem_info->dma32 = (p->flags & MODS_ALLOC_DMA32) ? true : false;
p_mem_info->force_numa = (p->flags & MODS_ALLOC_FORCE_NUMA) p_mem_info->force_numa = (p->flags & MODS_ALLOC_FORCE_NUMA)
? true : false; ? true : false;
p_mem_info->reservation_tag = 0;
#ifdef MODS_HASNT_NUMA_NO_NODE #ifdef MODS_HASNT_NUMA_NO_NODE
p_mem_info->numa_node = numa_node_id(); p_mem_info->numa_node = numa_node_id();
#else #else
p_mem_info->numa_node = NUMA_NO_NODE; p_mem_info->numa_node = NUMA_NO_NODE;
#endif #endif
p_mem_info->dev = NULL; p_mem_info->dev = NULL;
if ((p->flags & MODS_ALLOC_USE_NUMA) && if ((p->flags & MODS_ALLOC_USE_NUMA) &&
p->numa_node != MODS_ANY_NUMA_NODE) p->numa_node != MODS_ANY_NUMA_NODE)
@@ -2450,6 +2468,190 @@ failed:
} }
#endif /* MODS_HAS_TEGRA */ #endif /* MODS_HAS_TEGRA */
int esc_mods_reserve_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p)
{
struct MODS_MEM_INFO *p_mem_info;
struct MODS_MEM_INFO *p_existing_mem_info = NULL;
struct MODS_MEM_RESERVATION *p_reservation = NULL;
struct list_head *head = &client->mem_alloc_list;
struct list_head *iter;
int err = -EINVAL;
LOG_ENT();
if (!(p->tag) || (p->tag > MODS_MEM_MAX_RESERVATIONS)) {
cl_error("invalid tag 0x%llx for memory reservations\n",
(unsigned long long)p->tag);
LOG_EXT();
return -EINVAL;
}
/* Get passed mem_info */
p_mem_info = get_mem_handle(client, p->memory_handle);
if (unlikely(!p_mem_info)) {
cl_error("failed to get memory handle\n");
LOG_EXT();
return -EINVAL;
}
/* Lock mutexes */
err = mutex_lock_interruptible(&mem_reservation_mtx);
if (unlikely(err)) {
LOG_EXT();
return err;
}
err = mutex_lock_interruptible(&client->mtx);
if (unlikely(err)) {
mutex_unlock(&mem_reservation_mtx);
LOG_EXT();
return err;
}
/* Check for existing reservation */
p_reservation = &mem_reservations[p->tag - 1];
if (unlikely(p_reservation->p_mem_info)) {
cl_error("reservation 0x%llX already exists\n",
(unsigned long long)p->tag);
err = -ENOMEM;
goto failed;
}
/* Find existing handle in client and mark as reserved */
list_for_each(iter, head) {
p_existing_mem_info = list_entry(iter, struct MODS_MEM_INFO, list);
if (p_existing_mem_info == p_mem_info)
break;
p_existing_mem_info = NULL;
}
if (unlikely(!p_existing_mem_info)) {
cl_error("failed to find mem info requested by reservation\n");
err = -EINVAL;
goto failed;
}
p_existing_mem_info->reservation_tag = p->tag; /* Set tag to avoid free */
/* Add memory handle to new reservation */
p_reservation->p_mem_info = p_existing_mem_info;
p_reservation->client_id = client->client_id;
failed:
mutex_unlock(&client->mtx);
mutex_unlock(&mem_reservation_mtx);
LOG_EXT();
return err;
}
int esc_mods_get_reserved_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p)
{
struct MODS_MEM_RESERVATION *p_reservation = NULL;
int err = -EINVAL;
LOG_ENT();
if (!(p->tag) || (p->tag > MODS_MEM_MAX_RESERVATIONS)) {
cl_error("invalid tag 0x%llx for memory reservations\n",
(unsigned long long)p->tag);
LOG_EXT();
return -EINVAL;
}
err = mutex_lock_interruptible(&mem_reservation_mtx);
if (unlikely(err)) {
LOG_EXT();
return err;
}
/* Locate existing reservation */
p_reservation = &mem_reservations[p->tag - 1];
if (unlikely(!p_reservation->p_mem_info)) {
cl_error("no mem reservation for tag 0x%llX\n",
(unsigned long long)p->tag);
p->memory_handle = 0;
err = -EINVAL;
goto failed;
}
if ((p_reservation->client_id != client->client_id) &&
(p_reservation->client_id)) {
cl_error("reservation 0x%llX is claimed by client_id %d\n",
(unsigned long long)p->tag, p_reservation->client_id);
err = -EBUSY;
p->memory_handle = 0;
goto failed;
}
/* Claim reservation and return handle */
if (p_reservation->client_id != client->client_id) {
p_reservation->client_id = client->client_id;
register_alloc(client, p_reservation->p_mem_info);
atomic_inc(&client->num_allocs); /* Increment allocations */
atomic_add((int)p_reservation->p_mem_info->num_pages,
&client->num_pages); /* Increment pages */
}
p->memory_handle = (u64)(size_t)p_reservation->p_mem_info;
failed:
mutex_unlock(&mem_reservation_mtx);
LOG_EXT();
return err;
}
int esc_mods_release_reserved_allocation(struct mods_client *client,
struct MODS_RESERVE_ALLOCATION *p)
{
struct MODS_MEM_RESERVATION *p_reservation = NULL;
int err = -EINVAL;
LOG_ENT();
if (!(p->tag) || (p->tag > MODS_MEM_MAX_RESERVATIONS)) {
cl_error("invalid tag 0x%llx for memory reservations\n",
(unsigned long long)p->tag);
LOG_EXT();
return -EINVAL;
}
err = mutex_lock_interruptible(&mem_reservation_mtx);
if (unlikely(err)) {
LOG_EXT();
return err;
}
/* Locate existing reservation */
p_reservation = &mem_reservations[p->tag - 1];
if (unlikely(!p_reservation->p_mem_info)) {
cl_error("no mem reservation for tag 0x%llX\n",
(unsigned long long)p->tag);
err = -EINVAL;
goto failed;
}
if (!p_reservation->client_id) {
cl_error("Reservation with tag 0x%llX not claimed by calling client id\n",
(unsigned long long)p->tag);
err = -EINVAL;
goto failed;
}
if (p_reservation->client_id != client->client_id) {
cl_error("reservation with tag 0x%llX not claimed by any client\n",
(unsigned long long)p->tag);
err = -EBUSY;
goto failed;
}
if (likely(p_reservation->p_mem_info)) {
/* Unregister and clear reservation_tag field */
p_reservation->p_mem_info->reservation_tag = 0;
memset(p_reservation, 0, sizeof(*p_reservation));
}
failed:
mutex_unlock(&mem_reservation_mtx);
LOG_EXT();
return err;
}
#ifdef CONFIG_ARM64 #ifdef CONFIG_ARM64
static void clear_contiguous_cache(struct mods_client *client, static void clear_contiguous_cache(struct mods_client *client,
u64 virt_start, u64 virt_start,
@@ -2553,3 +2755,28 @@ int esc_mods_flush_cpu_cache_range(struct mods_client *client,
return err; return err;
} }
#endif /* CONFIG_ARM64 */ #endif /* CONFIG_ARM64 */
/***************************
* RESERVATION INIT / EXIT *
***************************/
void mods_free_mem_reservations(void)
{
int i;
struct mods_client client;
/* Dummy client used to ensure ensuing functions do not crash */
memset(&client, 0, sizeof(client));
/* Clear reserved on claimed reservations and free unclaimed ones */
for (i = 0; i < MODS_MEM_MAX_RESERVATIONS; i++) {
struct MODS_MEM_RESERVATION *p_reservation = &mem_reservations[i];
/* Existing reservation */
if (p_reservation->p_mem_info) {
release_chunks(&client, p_reservation->p_mem_info);
pci_dev_put(p_reservation->p_mem_info->dev);
kfree(p_reservation->p_mem_info);
memset(p_reservation, 0, sizeof(*p_reservation));
}
}
}

View File

@@ -1121,7 +1121,7 @@ int esc_mods_read_dev_property(struct mods_client *client,
err = device_property_read_u64_array(&dev->dev, p->prop_name, err = device_property_read_u64_array(&dev->dev, p->prop_name,
(u64 *)p->output, p->array_size); (u64 *)p->output, p->array_size);
if (unlikely(err)) if (unlikely(err))
cl_error("failed to read property %s\n", p->prop_name); cl_info("failed to read property %s\n", p->prop_name);
error: error:
pci_dev_put(dev); pci_dev_put(dev);

View File

@@ -1,137 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2014-2023, NVIDIA CORPORATION. All rights reserved. */
#include <linux/uaccess.h>
#include <../drivers/video/tegra/dc/dc_priv.h>
#include <video/tegra_dc_ext_kernel.h>
#include <linux/platform/tegra/mc.h>
#include "mods_internal.h"
static void mods_tegra_dc_set_windowattr_basic(
struct mods_client *client,
struct tegra_dc_win *win,
const struct MODS_TEGRA_DC_WINDOW *mods_win)
{
win->global_alpha = 0;
win->z = 0;
win->stride = 0;
win->stride_uv = 0;
win->flags = TEGRA_WIN_FLAG_ENABLED;
if (mods_win->flags & MODS_TEGRA_DC_WINDOW_FLAG_TILED)
win->flags |= TEGRA_WIN_FLAG_TILED;
if (mods_win->flags & MODS_TEGRA_DC_WINDOW_FLAG_SCAN_COL)
win->flags |= TEGRA_WIN_FLAG_SCAN_COLUMN;
win->fmt = mods_win->pixformat;
win->x.full = mods_win->x;
win->y.full = mods_win->y;
win->w.full = mods_win->w;
win->h.full = mods_win->h;
/* XXX verify that this doesn't go outside display's active region */
win->out_x = mods_win->out_x;
win->out_y = mods_win->out_y;
win->out_w = mods_win->out_w;
win->out_h = mods_win->out_h;
cl_debug(DEBUG_TEGRADC,
"set_windowattr_basic window %u:\n"
"\tflags : 0x%08x\n"
"\tfmt : %u\n"
"\tinput : (%u, %u, %u, %u)\n"
"\toutput: (%u, %u, %u, %u)\n",
win->idx, win->flags, win->fmt, dfixed_trunc(win->x),
dfixed_trunc(win->y), dfixed_trunc(win->w),
dfixed_trunc(win->h), win->out_x, win->out_y, win->out_w,
win->out_h);
}
int esc_mods_tegra_dc_config_possible(struct mods_client *client,
struct MODS_TEGRA_DC_CONFIG_POSSIBLE *args)
{
int i;
struct tegra_dc *dc = tegra_dc_get_dc(args->head);
struct tegra_dc_win *dc_wins[DC_N_WINDOWS];
#ifndef CONFIG_TEGRA_ISOMGR
struct clk *emc_clk = 0;
unsigned long max_bandwidth = 0;
unsigned long current_emc_freq = 0;
unsigned long max_available_bandwidth = 0;
#else
int ret = -EINVAL;
#endif
LOG_ENT();
BUG_ON(args->win_num > tegra_dc_get_numof_dispwindows());
if (!dc) {
LOG_EXT();
return -EINVAL;
}
for (i = 0; i < args->win_num; i++) {
unsigned int idx;
if (args->windows[i].index < 0) {
cl_debug(DEBUG_TEGRADC,
"invalid index %d for win %d\n",
i, args->windows[i].index);
return -EINVAL;
}
idx = (unsigned int)args->windows[i].index;
if (args->windows[i].flags &
MODS_TEGRA_DC_WINDOW_FLAG_ENABLED) {
mods_tegra_dc_set_windowattr_basic(client,
&dc->tmp_wins[idx],
&args->windows[i]);
} else {
dc->tmp_wins[idx].flags = 0;
}
dc_wins[i] = &dc->tmp_wins[idx];
cl_debug(DEBUG_TEGRADC,
"head %u, using index %d for win %d\n",
args->head, i, idx);
}
cl_debug(DEBUG_TEGRADC,
"head %u, dc->mode.pclk %u\n",
args->head, dc->mode.pclk);
#ifndef CONFIG_TEGRA_ISOMGR
max_bandwidth = tegra_dc_get_bandwidth(dc_wins, args->win_num);
emc_clk = clk_get_sys("tegra_emc", "emc");
if (IS_ERR(emc_clk)) {
cl_debug(DEBUG_TEGRADC,
"invalid clock specified when fetching EMC clock\n");
} else {
current_emc_freq = clk_get_rate(emc_clk);
current_emc_freq /= 1000;
max_available_bandwidth =
8 * tegra_emc_freq_req_to_bw(current_emc_freq);
max_available_bandwidth = (max_available_bandwidth / 100) * 50;
}
cl_debug(DEBUG_TEGRADC,
"b/w needed %lu, b/w available %lu\n",
max_bandwidth, max_available_bandwidth);
args->possible = (max_bandwidth <= max_available_bandwidth);
#else
ret = tegra_dc_bandwidth_negotiate_bw(dc, dc_wins, args->win_num);
args->possible = (ret == 0);
#endif
for (i = 0; i < args->win_num; i++) {
args->windows[i].bandwidth = dc_wins[i]->new_bandwidth;
cl_debug(DEBUG_TEGRADC,
"head %u, win %d, b/w %d\n",
args->head,
dc_wins[i]->idx,
dc_wins[i]->new_bandwidth);
}
LOG_EXT();
return 0;
}

View File

@@ -8,7 +8,7 @@
/* Driver version */ /* Driver version */
#define MODS_DRIVER_VERSION_MAJOR 4 #define MODS_DRIVER_VERSION_MAJOR 4
#define MODS_DRIVER_VERSION_MINOR 22 #define MODS_DRIVER_VERSION_MINOR 23
#define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \ #define MODS_DRIVER_VERSION ((MODS_DRIVER_VERSION_MAJOR << 8) | \
((MODS_DRIVER_VERSION_MINOR / 10) << 4) | \ ((MODS_DRIVER_VERSION_MINOR / 10) << 4) | \
(MODS_DRIVER_VERSION_MINOR % 10)) (MODS_DRIVER_VERSION_MINOR % 10))
@@ -1947,6 +1947,38 @@ struct MODS_IDLE {
__u32 num_loops; __u32 num_loops;
}; };
/* Used by MODS_ESC_RESERVE_ALLOCATION, MODS_ESC_GET_RESERVED_ALLOCATION,
* and MODS_ESC_RELEASE_RESERVED_ALLOCATION ioctls.
*
* MODS_ESC_RESERVE_ALLOCATION permits the reservation of a memory allocation
* specified by 'memory_handle' with the tag 'tag'. The 'tag' can take on
* values between 1 and MODS_MEM_MAX_RESERVATIONS and is used to index
* reservations.
*
* MODS_ESC_GET_RESERVED_ALLOCATION is used to claim ownership of a reservation
* specified by 'tag'. If an unclaimed reservation is found using 'tag',
* 'memory_handle' will be populated with the allocation handle.
*
* MODS_ESC_RELEASE_RESERVED_ALLOCATION is used to completely free and stop the
* usage of a memory reservation made through MODS_ESC_RESERVE_ALLOCATION or
* obtained through MODS_ESC_GET_RESERVED_ALLOCATION. The 'tag' is accepted as
* an input to identify the reservation to release. For this ioctl,
* 'memory_handle' is unused.
*
* Limitations include:
* - Only one client may own a reservation at any given time
* - The client reserving the allocation automatically owns the reservation
* - There are two ways to "unclaim" a reservation without freeing the memory:
* 1. Calling MODS_ESC_FREE_PAGES with the associated handle
* 2. Closing a client
*/
struct MODS_RESERVE_ALLOCATION {
/* IN */
__u64 tag;
/* IN/OUT */
__u64 memory_handle;
};
#pragma pack(pop) #pragma pack(pop)
#define MODS_IOC_MAGIC 'x' #define MODS_IOC_MAGIC 'x'
@@ -2157,12 +2189,15 @@ struct MODS_IDLE {
#define MODS_ESC_SEND_TZ_MSG MODSIO(WR, 139, MODS_TZ_PARAMS) #define MODS_ESC_SEND_TZ_MSG MODSIO(WR, 139, MODS_TZ_PARAMS)
#define MODS_ESC_OIST_STATUS MODSIO(WR, 140, MODS_TEGRA_OIST_STATUS) #define MODS_ESC_OIST_STATUS MODSIO(WR, 140, MODS_TEGRA_OIST_STATUS)
#define MODS_ESC_INVOKE_OPTEE_TA MODSIO(WR, 141, MODS_OPTEE_PARAMS) #define MODS_ESC_INVOKE_OPTEE_TA MODSIO(WR, 141, MODS_OPTEE_PARAMS)
#define MODS_ESC_READ_DEV_PROPERTY MODSIO(WR, 142, MODS_READ_DEV_PROPERTY) #define MODS_ESC_READ_DEV_PROPERTY MODSIO(WR_BAD, 142, MODS_READ_DEV_PROPERTY)
#define MODS_ESC_PROXIMITY_TO_NUMA_NODE MODSIO(WR, 143, MODS_PROXIMITY_TO_NUMA_NODE) #define MODS_ESC_PROXIMITY_TO_NUMA_NODE MODSIO(WR, 143, MODS_PROXIMITY_TO_NUMA_NODE)
#define MODS_ESC_MODS_SEND_IPI MODSIO(W, 144, MODS_SEND_IPI) #define MODS_ESC_MODS_SEND_IPI MODSIO(W, 144, MODS_SEND_IPI)
#define MODS_ESC_FFA_CMD MODSIO(WR, 145, MODS_FFA_PARAMS) #define MODS_ESC_FFA_CMD MODSIO(WR, 145, MODS_FFA_PARAMS)
#define MODS_ESC_BPMP_UPHY_LANE_EOM_SCAN MODSIO(WR, 146, \ #define MODS_ESC_BPMP_UPHY_LANE_EOM_SCAN MODSIO(WR, 146, \
MODS_BPMP_UPHY_LANE_EOM_SCAN_PARAMS) MODS_BPMP_UPHY_LANE_EOM_SCAN_PARAMS)
#define MODS_ESC_IDLE MODSIO(W, 147, MODS_IDLE) #define MODS_ESC_IDLE MODSIO(W, 147, MODS_IDLE)
#define MODS_ESC_RESERVE_ALLOCATION MODSIO(W, 148, MODS_RESERVE_ALLOCATION)
#define MODS_ESC_GET_RESERVED_ALLOCATION MODSIO(WR, 149, MODS_RESERVE_ALLOCATION)
#define MODS_ESC_RELEASE_RESERVED_ALLOCATION MODSIO(W, 150, MODS_RESERVE_ALLOCATION)
#endif /* _UAPI_MODS_H_ */ #endif /* _UAPI_MODS_H_ */