nvtzvault: add driver for GP comm via oesp mailbox

- Allow applications to interact with TA via IOCTL
  interface, with one device node per TA/token to
  allow access control policies to be enforced.
- Validate the request parameters
- Add support to to serialize request data from application
  and deserialize response from the TA
- Include process name as part of request to enable
  TA to log required info to nvlog buffer

Jira ESSS-1713

Change-Id: I9e4c4687ecb6e01b0d88130fd640a9b4a59676aa
Signed-off-by: Nagaraj P N <nagarajp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3282272
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: svc-percl-checker <svc-percl-checker@nvidia.com>
This commit is contained in:
Nagaraj P N
2025-01-11 18:32:43 +05:30
committed by Jon Hunter
parent a23d954e3e
commit 083a40b0d3
10 changed files with 1862 additions and 0 deletions

View File

@@ -115,6 +115,7 @@ kernel_module(
"drivers/nv-p2p/nvidia-p2p.ko",
"drivers/nvpmodel/nvpmodel-clk-cap.ko",
"drivers/nvpps/nvpps.ko",
"drivers/nvtzvault/nvtzvault.ko",
"drivers/nv-virtio/nv-virtio-console-poc.ko",
"drivers/pci/controller/pcie-tegra-vf.ko",
"drivers/pci/controller/private-soc/pcie-tegra264.ko",

View File

@@ -51,6 +51,7 @@ obj-m += misc/
obj-m += net/
obj-m += nvpps/
obj-m += nvpmodel/
obj-m += nvtzvault/
obj-m += nv-p2p/
ifdef CONFIG_PCI
obj-m += pci/

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
nvtzvault-objs += nvtzvault-main.o oesp-mailbox.o nvtzvault-helper.o
nvtzvault-y += nvtzvault-main.o
obj-m += nvtzvault.o

View File

@@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#ifndef NVTZVAULT_COMMON_H
#define NVTZVAULT_COMMON_H
#define NVTZVAULT_ERR(...) pr_err("nvtzvault " __VA_ARGS__)
#endif

View File

@@ -0,0 +1,482 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "nvtzvault-helper.h"
#include "nvtzvault-common.h"
#include <linux/errno.h>
#include <linux/printk.h>
#include <linux/uaccess.h>
#include <linux/string.h>
#include <linux/slab.h>
#define NVTZVAULT_TEE_PARAM_TYPE_NONE 0U
#define NVTZVAULT_TEE_PARAM_TYPE_VALUE_INPUT 1U
#define NVTZVAULT_TEE_PARAM_TYPE_VALUE_OUTPUT 3U
#define NVTZVAULT_TEE_PARAM_TYPE_VALUE_INOUT 3U
#define NVTZVAULT_TEE_PARAM_TYPE_MEMREF_INPUT 5U
#define NVTZVAULT_TEE_PARAM_TYPE_MEMREF_OUTPUT 7U
#define NVTZVAULT_TEE_PARAM_TYPE_MEMREF_INOUT 7U
int nvtzvault_tee_translate_saerror_to_syserror(const enum nvtzvault_tzv_error tzv_error)
{
const enum nvtzvault_tzv_error tzv_error_array[] = {
TZVaultSuccess, TZVaultPending,
TZVaultErrorGeneric, TZVaultErrorAccessConflict,
TZVaultErrorAccessDenied, TZVaultErrorAgain,
TZVaultErrorBadFormat, TZVaultErrorBadParameters,
TZVaultErrorBadState, TZVaultErrorBusy,
TZVaultErrorCancel, TZVaultErrorCommunication,
TZVaultErrorExcessData, TZVaultErrorItemNotFound,
TZVaultErrorMacInvalid, TZVaultErrorNoData,
TZVaultErrorNoMessage, TZVaultErrorNoResource,
TZVaultErrorNotImplemented, TZVaultErrorNotSupported,
TZVaultErrorOutOfMemory, TZVaultErrorOverflow,
TZVaultErrorSecurity, TZVaultErrorShortBuffer,
TZVaultErrorSignatureInvalid, TZVaultErrorStorageNoSpace,
TZVaultErrorTargetDead, TZVaultErrorTimeNeedsReset,
TZVaultErrorTimeNotSet, TZVaultErrorTimeout };
const int sys_error_array[] = {
0, -EINPROGRESS,
-EUSERS, -EFAULT,
-EACCES, -EAGAIN,
-EBADF, -EINVAL,
-ENOTCONN, -EBUSY,
-ECANCELED, -ECONNRESET,
-E2BIG, -ENOENT,
-EBADMSG, -ENODATA,
-ENOMSG, -ENOSR,
-ENOSYS, -EOPNOTSUPP,
-ENOMEM, -EOVERFLOW,
-EPERM, -ENOBUFS,
-EBADE, -ENOSPC,
-EHOSTDOWN, -ERESTART,
-ETIME, -ETIMEDOUT };
// sizeof operator is used to calculate the number of elements in tzv_error_array
const uint64_t num_tzv_errors =
sizeof(tzv_error_array) / sizeof(const enum nvtzvault_tzv_error);
int sys_error = 0;
uint64_t index = 0ULL;
for (index = 0ULL; index < num_tzv_errors; ++index) {
if (tzv_error_array[index] == tzv_error)
break;
}
if (index < num_tzv_errors) {
sys_error = sys_error_array[index];
} else {
NVTZVAULT_ERR("Unknown SA error code: 0x%x\n", tzv_error);
sys_error = -EUSERS;
}
return sys_error;
}
static inline uint32_t nvtzvault_tee_get_param_type_from_index(const uint32_t t, const uint32_t i)
{
uint32_t result = 0U;
if ((i << 2U) < 32U)
result = (t >> (i << 2U)) & 0xFU;
return result;
}
static bool nvtzvault_tee_is_param_membuf(const uint32_t t)
{
return ((t & 4U) != 0U);
}
static bool nvtzvault_tee_is_param_output(const uint32_t t)
{
return ((t & 2U) != 0U);
}
static bool nvtzvault_tee_is_param_input(const uint32_t t)
{
return ((t & 1U) != 0U);
}
static bool nvtzvault_tee_is_param_valid(const uint32_t t)
{
static const uint32_t validParamTypes[] = {
NVTZVAULT_TEE_PARAM_TYPE_NONE,
NVTZVAULT_TEE_PARAM_TYPE_VALUE_INPUT,
NVTZVAULT_TEE_PARAM_TYPE_VALUE_OUTPUT,
NVTZVAULT_TEE_PARAM_TYPE_VALUE_INOUT,
NVTZVAULT_TEE_PARAM_TYPE_MEMREF_INPUT,
NVTZVAULT_TEE_PARAM_TYPE_MEMREF_OUTPUT,
NVTZVAULT_TEE_PARAM_TYPE_MEMREF_INOUT
};
uint32_t i;
bool retval = false;
for (i = 0U; i < sizeof(validParamTypes) / sizeof(uint32_t); ++i) {
if (t == validParamTypes[i]) {
retval = true;
break;
}
}
return retval;
}
static bool nvtzvault_tee_are_all_params_valid(uint32_t param_types)
{
uint32_t i;
uint32_t param_type;
bool all_valid = true;
for (i = 0; i < NVTZVAULT_TEE_PARAM_MAX_COUNT; i++) {
param_type = nvtzvault_tee_get_param_type_from_index(param_types, i);
if (!nvtzvault_tee_is_param_valid(param_type)) {
NVTZVAULT_ERR("Invalid parameter type 0x%x at index %d\n", param_type, i);
all_valid = false;
break;
}
}
return all_valid;
}
static int nvtzvault_tee_check_overflow_and_skip_bytes(struct nvtzvault_tee_buf_context *ctx,
const uint32_t size)
{
int result = 0;
if (size == 0)
goto end;
if (size > ctx->buf_len) {
NVTZVAULT_ERR("Invalid size\n");
result = -EOVERFLOW;
goto end;
}
if (ctx->current_offset > ctx->buf_len - size) {
NVTZVAULT_ERR("Failed to write due to overflow\n");
result = -EOVERFLOW;
goto end;
}
ctx->current_offset += size;
end:
return result;
}
int nvtzvault_tee_check_overflow_and_write(struct nvtzvault_tee_buf_context *ctx, void *data,
const uint32_t size, bool is_user_space)
{
int result = 0;
uint32_t i;
void *local_buf = NULL;
if (size == 0)
goto end;
if (size > ctx->buf_len) {
NVTZVAULT_ERR("Invalid size\n");
result = -EOVERFLOW;
goto end;
}
if (ctx->current_offset > ctx->buf_len - size) {
NVTZVAULT_ERR("Failed to write due to overflow\n");
result = -EOVERFLOW;
goto end;
}
local_buf = kzalloc(size, GFP_KERNEL);
if (!local_buf) {
NVTZVAULT_ERR("Failed to allocate memory\n");
result = -ENOMEM;
goto end;
}
if (is_user_space) {
result = copy_from_user(local_buf, (void __user *)data, size);
if (result != 0) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user %d\n", __func__, result);
goto end;
}
for (i = 0U; i < size; i++)
ctx->buf_ptr[ctx->current_offset + i] = ((uint8_t *)local_buf)[i];
} else {
for (i = 0U; i < size; i++)
ctx->buf_ptr[ctx->current_offset + i] = ((uint8_t *)data)[i];
}
ctx->current_offset += size;
end:
kfree(local_buf);
return result;
}
int nvtzvault_tee_check_overflow_and_read(struct nvtzvault_tee_buf_context *ctx, void *data,
const uint32_t size, bool is_user_space)
{
int result = 0;
uint32_t i;
if (size == 0)
goto end;
if (size > ctx->buf_len) {
NVTZVAULT_ERR("Invalid size\n");
result = -EOVERFLOW;
goto end;
}
if (ctx->current_offset > ctx->buf_len - size) {
NVTZVAULT_ERR("Failed to write due to overflow\n");
result = -EOVERFLOW;
goto end;
}
if (is_user_space) {
result = copy_to_user((void __user *)data, &ctx->buf_ptr[ctx->current_offset],
size);
if (result != 0) {
NVTZVAULT_ERR("%s(): Failed to copy_to_user %d\n", __func__, result);
goto end;
}
} else {
for (i = 0U; i < size; i++)
((uint8_t *)data)[i] = ctx->buf_ptr[ctx->current_offset + i];
}
ctx->current_offset += size;
end:
return result;
}
static int nvtzvault_tee_write_value(struct nvtzvault_tee_buf_context *ctx,
struct nvtzvault_teec_parameter *param, bool skip)
{
int result;
uint32_t size;
size = sizeof(param->value);
if (skip)
result = nvtzvault_tee_check_overflow_and_skip_bytes(ctx, size);
else
result = nvtzvault_tee_check_overflow_and_write(ctx, &param->value, size, false);
if (result != 0)
NVTZVAULT_ERR("%s failed %d", __func__, result);
return result;
}
static int nvtzvault_tee_read_value(struct nvtzvault_tee_buf_context *ctx,
struct nvtzvault_teec_parameter *param, bool skip)
{
int result;
uint32_t size;
size = sizeof(param->value);
if (skip)
result = nvtzvault_tee_check_overflow_and_skip_bytes(ctx, size);
else
result = nvtzvault_tee_check_overflow_and_read(ctx, &param->value, size, false);
if (result != 0)
NVTZVAULT_ERR("%s failed %d", __func__, result);
return result;
}
static int nvtzvault_tee_write_memref(struct nvtzvault_tee_buf_context *ctx,
struct nvtzvault_teec_parameter *param, bool skip)
{
int result;
uint32_t size;
size = sizeof(param->memref.size);
result = nvtzvault_tee_check_overflow_and_write(ctx, &param->memref.size, size, false);
if (result != 0) {
result = -EFAULT;
NVTZVAULT_ERR("%s: failed to write memref size\n", __func__);
goto end;
}
size = param->memref.size;
if (skip)
result = nvtzvault_tee_check_overflow_and_skip_bytes(ctx, size);
else
result = nvtzvault_tee_check_overflow_and_write(ctx, param->memref.buffer,
size, true);
if (result != 0)
NVTZVAULT_ERR("%s failed %d", __func__, result);
end:
return result;
}
static int nvtzvault_tee_read_memref(struct nvtzvault_tee_buf_context *ctx,
struct nvtzvault_teec_parameter *param, bool skip)
{
int result;
uint32_t size;
size = sizeof(param->memref.size);
result = nvtzvault_tee_check_overflow_and_read(ctx, &param->memref.size, size, false);
if (result != 0) {
result = -EFAULT;
NVTZVAULT_ERR("%s: failed to read memref size\n", __func__);
goto end;
}
size = param->memref.size;
if (skip)
result = nvtzvault_tee_check_overflow_and_skip_bytes(ctx, size);
else
result = nvtzvault_tee_check_overflow_and_read(ctx, param->memref.buffer,
size, true);
if (size == 0U)
param->memref.buffer = NULL;
end:
if (result != 0)
NVTZVAULT_ERR("%s failed %d", __func__, result);
return result;
}
int nvtzvault_tee_write_all_params(struct nvtzvault_tee_buf_context *ctx, uint32_t cmd_id,
uint32_t param_types,
struct nvtzvault_teec_parameter params[NVTZVAULT_TEE_PARAM_MAX_COUNT])
{
int result;
uint32_t curr_param_type;
bool skip;
uint32_t i;
if (!nvtzvault_tee_are_all_params_valid(param_types)) {
NVTZVAULT_ERR("%s: invalid param types %u\n", __func__, param_types);
result = -EINVAL;
goto end;
}
result = nvtzvault_tee_check_overflow_and_write(ctx, &param_types, sizeof(param_types),
false);
if (result != 0) {
NVTZVAULT_ERR("%s: failed to write param types %d\n", __func__, result);
result = -EFAULT;
goto end;
}
result = nvtzvault_tee_check_overflow_and_write(ctx, &cmd_id, sizeof(cmd_id), false);
if (result != 0) {
NVTZVAULT_ERR("%s: failed to write cmd id %d\n", __func__, result);
result = -EFAULT;
goto end;
}
for (i = 0; i < NVTZVAULT_TEE_PARAM_MAX_COUNT; i++) {
curr_param_type = nvtzvault_tee_get_param_type_from_index(param_types, i);
if (curr_param_type == NVTZVAULT_TEE_PARAM_TYPE_NONE)
continue;
skip = !nvtzvault_tee_is_param_input(curr_param_type);
if (nvtzvault_tee_is_param_membuf(curr_param_type))
result = nvtzvault_tee_write_memref(ctx, &params[i], skip);
else
result = nvtzvault_tee_write_value(ctx, &params[i], skip);
if (result != 0) {
NVTZVAULT_ERR("Failed to write parameter %d: %d\n", i, result);
goto end;
}
}
end:
return result;
}
int nvtzvault_tee_read_all_params(struct nvtzvault_tee_buf_context *ctx, uint32_t *p_param_types,
uint32_t *p_cmd_id,
struct nvtzvault_teec_parameter params[NVTZVAULT_TEE_PARAM_MAX_COUNT])
{
int result;
uint32_t curr_param_type;
bool skip;
uint32_t i;
result = nvtzvault_tee_check_overflow_and_read(ctx, p_param_types,
sizeof(*p_param_types), false);
if (result != 0) {
NVTZVAULT_ERR("%s: failed to read param types %d\n", __func__, result);
result = -EFAULT;
goto end;
}
if (!nvtzvault_tee_are_all_params_valid(*p_param_types)) {
NVTZVAULT_ERR("%s: invalid param types %u\n", __func__, *p_param_types);
result = -EINVAL;
goto end;
}
result = nvtzvault_tee_check_overflow_and_read(ctx, p_cmd_id, sizeof(*p_cmd_id), false);
if (result != 0) {
NVTZVAULT_ERR("%s: failed to read cmd id %d\n", __func__, result);
result = -EFAULT;
goto end;
}
for (i = 0; i < NVTZVAULT_TEE_PARAM_MAX_COUNT; i++) {
curr_param_type = nvtzvault_tee_get_param_type_from_index(*p_param_types, i);
if (curr_param_type == NVTZVAULT_TEE_PARAM_TYPE_NONE)
continue;
skip = !nvtzvault_tee_is_param_output(curr_param_type);
if (nvtzvault_tee_is_param_membuf(curr_param_type))
result = nvtzvault_tee_read_memref(ctx, &params[i], skip);
else
result = nvtzvault_tee_read_value(ctx, &params[i], skip);
if (result != 0) {
NVTZVAULT_ERR("%s: failed to read param %u: %d\n", __func__, i, result);
goto end;
}
}
end:
return result;
}
int32_t nvtzvault_tee_buf_context_init(struct nvtzvault_tee_buf_context *ctx,
void *buf_ptr, uint32_t buf_len)
{
if (!ctx || !buf_ptr || (buf_len == 0)) {
NVTZVAULT_ERR("Invalid arguments\n");
return -EINVAL;
}
ctx->buf_ptr = buf_ptr;
ctx->buf_len = buf_len;
ctx->current_offset = 0;
return 0;
}
void nvtzvault_tee_buf_context_reset(struct nvtzvault_tee_buf_context *ctx)
{
if (!ctx) {
NVTZVAULT_ERR("Invalid arguments\n");
return;
}
ctx->current_offset = 0;
}

View File

@@ -0,0 +1,188 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#ifndef NVTZVAULT_HELPER_H
#define NVTZVAULT_HELPER_H
#include <linux/types.h>
#include <uapi/misc/nvtzvault-ioctl.h>
#define NVTZVAULT_TEE_PARAM_MAX_COUNT (8U)
/**
* @brief Error codes returned by SA operations
*
* Enumeration of all possible error codes that can be returned
* by SA operations, which are then translated to system error codes.
*/
enum nvtzvault_tzv_error {
/** @brief Operation completed successfully */
TZVaultSuccess = 0x0U,
/** @brief Operation is in pending state */
TZVaultPending = 0x1000U,
/** @brief Generic error occurred */
TZVaultErrorGeneric = 0x2000U,
/** @brief Access conflict detected */
TZVaultErrorAccessConflict = 0x2001U,
/** @brief Access denied to requested resource */
TZVaultErrorAccessDenied = 0x2002U,
/** @brief Operation needs to be retried */
TZVaultErrorAgain = 0x2003U,
/** @brief Data format is invalid */
TZVaultErrorBadFormat = 0x2004U,
/** @brief Invalid parameters provided */
TZVaultErrorBadParameters = 0x2005U,
/** @brief System is in invalid state for operation */
TZVaultErrorBadState = 0x2006U,
/** @brief Resource is busy */
TZVaultErrorBusy = 0x2007U,
/** @brief Operation was cancelled */
TZVaultErrorCancel = 0x2008U,
/** @brief Communication error occurred */
TZVaultErrorCommunication = 0x2009U,
/** @brief Too much data provided */
TZVaultErrorExcessData = 0x200AU,
/** @brief Requested item not found */
TZVaultErrorItemNotFound = 0x200BU,
/** @brief MAC verification failed */
TZVaultErrorMacInvalid = 0x200CU,
/** @brief No data available */
TZVaultErrorNoData = 0x200DU,
/** @brief No message available */
TZVaultErrorNoMessage = 0x200EU,
/** @brief Resource not available */
TZVaultErrorNoResource = 0x200FU,
/** @brief Feature not implemented */
TZVaultErrorNotImplemented = 0x2010U,
/** @brief Operation not supported */
TZVaultErrorNotSupported = 0x2011U,
/** @brief Memory allocation failed */
TZVaultErrorOutOfMemory = 0x2012U,
/** @brief Buffer overflow occurred */
TZVaultErrorOverflow = 0x2013U,
/** @brief Security violation detected */
TZVaultErrorSecurity = 0x2014U,
/** @brief Provided buffer too small */
TZVaultErrorShortBuffer = 0x2015U,
/** @brief Signature verification failed */
TZVaultErrorSignatureInvalid = 0x2016U,
/** @brief No storage space available */
TZVaultErrorStorageNoSpace = 0x2017U,
/** @brief Target system is dead */
TZVaultErrorTargetDead = 0x2018U,
/** @brief System time needs to be reset */
TZVaultErrorTimeNeedsReset = 0x2019U,
/** @brief System time not set */
TZVaultErrorTimeNotSet = 0x201AU,
/** @brief Operation timed out */
TZVaultErrorTimeout = 0x201BU,
};
/**
* @brief Context structure for TEE buffer operations
*
* Contains information about the buffer used for communication between
* kernel driver and SA, including buffer pointer, length and current offset.
*/
struct nvtzvault_tee_buf_context {
/** @brief Pointer to the communication buffer */
uint8_t *buf_ptr;
/** @brief Total length of the buffer in bytes */
uint32_t buf_len;
/** @brief Current offset within the buffer for read/write operations */
uint32_t current_offset;
};
/**
* @brief Translates SA error codes to system error codes
*
* @param[in] tzv_error The SA error code to translate
*
* @return The corresponding system error code (negative errno value)
* 0 on success, negative error code on failure
*/
int nvtzvault_tee_translate_saerror_to_syserror(const enum nvtzvault_tzv_error tzv_error);
/**
* @brief Writes data to the buffer context with overflow checking
*
* @param[in,out] ctx The buffer context to write to
* @param[in] data Pointer to the data to write
* @param[in] size Size of the data to write in bytes
* @param[in] is_user_space True if data pointer is from userspace, false if kernel space
*
* @return 0 on success, negative error code on failure:
* -EOVERFLOW if write would overflow buffer
* -ENOMEM if temporary buffer allocation fails
* -EFAULT if userspace copy fails
*/
int nvtzvault_tee_check_overflow_and_write(struct nvtzvault_tee_buf_context *ctx, void *data,
const uint32_t size, bool is_user_space);
/**
* @brief Reads data from the buffer context with overflow checking
*
* @param[in,out] ctx The buffer context to read from
* @param[out] data Pointer where read data should be stored
* @param[in] size Size of the data to read in bytes
* @param[in] is_user_space True if data pointer is from userspace, false if kernel space
*
* @return 0 on success, negative error code on failure:
* -EOVERFLOW if read would overflow buffer
* -EFAULT if userspace copy fails
*/
int nvtzvault_tee_check_overflow_and_read(struct nvtzvault_tee_buf_context *ctx, void *data,
const uint32_t size, bool is_user_space);
/**
* @brief Writes command parameters to the buffer context
*
* @param[in,out] ctx The buffer context to write to
* @param[in] cmd_id Command ID to write
* @param[in] param_types Parameter types bitmap
* @param[in] params Array of parameters to write
*
* @return 0 on success, negative error code on failure:
* -EINVAL if parameter types are invalid
* -EFAULT if parameter writing fails
* -EOVERFLOW if write would overflow buffer
*/
int nvtzvault_tee_write_all_params(struct nvtzvault_tee_buf_context *ctx, uint32_t cmd_id,
uint32_t param_types,
struct nvtzvault_teec_parameter params[NVTZVAULT_TEE_PARAM_MAX_COUNT]);
/**
* @brief Reads command parameters from the buffer context
*
* @param[in,out] ctx The buffer context to read from
* @param[out] p_param_types Pointer to store parameter types bitmap
* @param[out] p_cmd_id Pointer to store command ID
* @param[out] params Array to store read parameters
*
* @return 0 on success, negative error code on failure:
* -EINVAL if parameter types are invalid
* -EFAULT if parameter reading fails
* -EOVERFLOW if read would overflow buffer
*/
int nvtzvault_tee_read_all_params(struct nvtzvault_tee_buf_context *ctx, uint32_t *p_param_types,
uint32_t *p_cmd_id, struct nvtzvault_teec_parameter params[NVTZVAULT_TEE_PARAM_MAX_COUNT]);
/**
* @brief Initializes the buffer context for TEE operations
*
* @param[in,out] ctx The buffer context to initialize
* @param[in] buf_ptr Pointer to the buffer to use for communication
* @param[in] buf_len Length of the buffer in bytes
*
* @return 0 on success, negative error code on failure
*/
int32_t nvtzvault_tee_buf_context_init(struct nvtzvault_tee_buf_context *ctx,
void *buf_ptr, uint32_t buf_len);
/**
* @brief Resets the buffer context for TEE operations
*
* @param[in,out] ctx The buffer context to reset
*/
void nvtzvault_tee_buf_context_reset(struct nvtzvault_tee_buf_context *ctx);
#endif

View File

@@ -0,0 +1,781 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include <nvidia/conftest.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/miscdevice.h>
#include <linux/io.h>
#include <uapi/misc/nvtzvault-ioctl.h>
#include "nvtzvault-helper.h"
#include "oesp-mailbox.h"
#include "nvtzvault-common.h"
#define NVTZVAULT_MAX_TA_ID (99U)
#define NVTZVAULT_MAX_DEV_COUNT (40U)
#define NVTZVAULT_TA_UUID_LEN (16U)
#define NVTZVAULT_TA_DEVICE_NAME_LEN (17U)
#define NVTZVAULT_BUFFER_SIZE (8192U)
#define NVTZVAULT_MAX_SESSIONS (32U)
enum nvtzvault_session_op_type {
NVTZVAULT_SESSION_OP_OPEN,
NVTZVAULT_SESSION_OP_INVOKE,
NVTZVAULT_SESSION_OP_CLOSE
};
struct nvtzvault_session_req_hdr {
enum nvtzvault_session_op_type op;
uint32_t session_id;
uint32_t guest_id;
};
struct nvtzvault_session_resp_hdr {
uint32_t result;
uint32_t session_id;
};
struct nvtzvault_ta {
struct miscdevice *dev;
uint8_t uuid[NVTZVAULT_TA_UUID_LEN];
uint32_t id;
uint32_t task_opcode;
uint32_t driver_id;
};
struct nvtzvault_dev {
struct nvtzvault_ta ta[NVTZVAULT_MAX_DEV_COUNT];
uint32_t ta_count;
struct device *dev;
struct mutex lock;
void *data_buf;
} g_nvtzvault_dev;
struct nvtzvault_ctx {
bool is_session_open;
uint32_t node_id;
struct nvtzvault_tee_buf_context buf_ctx;
uint8_t session_bitmap[NVTZVAULT_MAX_SESSIONS / 8];
uint32_t task_opcode;
uint32_t driver_id;
};
static int nvtzvault_ta_dev_open(struct inode *inode, struct file *filp)
{
struct miscdevice *misc;
struct nvtzvault_ctx *ctx = NULL;
int32_t ret;
misc = filp->private_data;
ctx = kzalloc(sizeof(struct nvtzvault_ctx), GFP_KERNEL);
if (!ctx) {
NVTZVAULT_ERR("%s: Failed to allocate context memory\n", __func__);
return -ENOMEM;
}
ctx->node_id = misc->this_device->id;
ctx->task_opcode = g_nvtzvault_dev.ta[ctx->node_id].task_opcode;
ctx->driver_id = g_nvtzvault_dev.ta[ctx->node_id].driver_id;
ctx->is_session_open = false;
ret = nvtzvault_tee_buf_context_init(&ctx->buf_ctx, g_nvtzvault_dev.data_buf,
NVTZVAULT_BUFFER_SIZE);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to initialize buffer context\n", __func__);
kfree(ctx);
return ret;
}
memset(ctx->session_bitmap, 0, sizeof(ctx->session_bitmap));
filp->private_data = ctx;
return 0;
}
static int nvtzvault_ta_dev_release(struct inode *inode, struct file *filp)
{
struct nvtzvault_ctx *ctx = filp->private_data;
kfree(ctx);
return 0;
}
static bool is_session_open(struct nvtzvault_ctx *ctx, uint32_t session_id)
{
uint32_t byte_idx = session_id / 8U;
uint32_t bit_idx = session_id % 8U;
if (session_id >= NVTZVAULT_MAX_SESSIONS) {
pr_err("%s: invalid session id %u\n", __func__, session_id);
return false;
}
return (ctx->session_bitmap[byte_idx] & (1U << bit_idx)) != 0;
}
static void set_session_open(struct nvtzvault_ctx *ctx, uint32_t session_id)
{
uint32_t byte_idx = session_id / 8U;
uint32_t bit_idx = session_id % 8U;
if (session_id >= NVTZVAULT_MAX_SESSIONS) {
pr_err("%s: invalid session id %u\n", __func__, session_id);
return;
}
ctx->session_bitmap[byte_idx] |= (1U << bit_idx);
}
static void set_session_closed(struct nvtzvault_ctx *ctx, uint32_t session_id)
{
uint32_t byte_idx = session_id / 8U;
uint32_t bit_idx = session_id % 8U;
if (session_id >= NVTZVAULT_MAX_SESSIONS) {
pr_err("%s: invalid session id %u\n", __func__, session_id);
return;
}
ctx->session_bitmap[byte_idx] &= ~(1U << bit_idx);
}
static int nvtzvault_write_process_name(struct nvtzvault_tee_buf_context *ctx)
{
char padded_name[12];
size_t name_len;
int ret;
// Get actual process name length (max 16 chars as per TASK_COMM_LEN)
name_len = strnlen(current->comm, 16);
// Copy process name (up to 12 chars) and zero-pad the rest
memset(padded_name, 0, sizeof(padded_name));
memcpy(padded_name, current->comm, min(name_len, (size_t)12));
ret = nvtzvault_tee_check_overflow_and_write(ctx, padded_name, 12, false);
if (ret != 0)
NVTZVAULT_ERR("%s: Failed to write process name: %d\n", __func__, ret);
return ret;
}
static int nvtzvault_open_session(struct nvtzvault_ctx *ctx,
struct nvtzvault_open_session_ctl *open_session_ctl)
{
int ret = 0;
struct nvtzvault_session_req_hdr req_hdr = {NVTZVAULT_SESSION_OP_OPEN, 0xFFFFFFFFU, 0U};
struct nvtzvault_session_resp_hdr resp_hdr;
uint32_t cmd_id;
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Write header
ret = nvtzvault_tee_check_overflow_and_write(&ctx->buf_ctx, &req_hdr,
sizeof(struct nvtzvault_session_req_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write request header: %d\n", __func__, ret);
return ret;
}
// prepare request and serialize parameters
ret = nvtzvault_tee_write_all_params(&ctx->buf_ctx, 0xFFFFFFFF,
open_session_ctl->operation.param_types,
open_session_ctl->operation.params);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write parameters: %d\n", __func__, ret);
return ret;
}
// Write process name (12 chars, zero-padded if needed)
ret = nvtzvault_write_process_name(&ctx->buf_ctx);
if (ret != 0)
return ret;
// trigger mailbox send, wait for response
ret = oesp_mailbox_send_and_read(ctx->buf_ctx.buf_ptr, ctx->buf_ctx.current_offset,
ctx->task_opcode, ctx->driver_id);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read mailbox response: %d\n", __func__, ret);
return ret;
}
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Read resp header
ret = nvtzvault_tee_check_overflow_and_read(&ctx->buf_ctx, &resp_hdr,
sizeof(struct nvtzvault_session_resp_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read response header: %d\n", __func__, ret);
return ret;
}
ret = nvtzvault_tee_translate_saerror_to_syserror(
(enum nvtzvault_tzv_error)resp_hdr.result);
if (ret != 0) {
NVTZVAULT_ERR("%s: SA returned error: %d\n", __func__, resp_hdr.result);
return ret;
}
// Validate session isn't already open
if (is_session_open(ctx, resp_hdr.session_id)) {
NVTZVAULT_ERR("%s: Session %u already open\n", __func__, resp_hdr.session_id);
return -EINVAL;
}
// Track the newly opened session
set_session_open(ctx, resp_hdr.session_id);
ret = nvtzvault_tee_read_all_params(&ctx->buf_ctx, &cmd_id,
&open_session_ctl->operation.param_types,
open_session_ctl->operation.params);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read parameters: %d\n", __func__, ret);
return ret;
}
open_session_ctl->session_id = resp_hdr.session_id;
return ret;
}
static int nvtzvault_invoke_cmd(struct nvtzvault_ctx *ctx,
struct nvtzvault_invoke_cmd_ctl *invoke_cmd_ctl)
{
int ret = 0;
struct nvtzvault_session_req_hdr req_hdr = {NVTZVAULT_SESSION_OP_INVOKE,
invoke_cmd_ctl->session_id, 0U};
struct nvtzvault_session_resp_hdr resp_hdr;
uint32_t cmd_id;
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Validate session is open
if (!is_session_open(ctx, invoke_cmd_ctl->session_id)) {
NVTZVAULT_ERR("%s: Session %u not open\n", __func__, invoke_cmd_ctl->session_id);
return -EINVAL;
}
// Write header
ret = nvtzvault_tee_check_overflow_and_write(&ctx->buf_ctx, &req_hdr,
sizeof(struct nvtzvault_session_req_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write request header: %d\n", __func__, ret);
return ret;
}
// Prepare request and serialize parameters
ret = nvtzvault_tee_write_all_params(&ctx->buf_ctx, invoke_cmd_ctl->command_id,
invoke_cmd_ctl->operation.param_types, invoke_cmd_ctl->operation.params);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write parameters: %d\n", __func__, ret);
return ret;
}
// Write process name (12 chars, zero-padded if needed)
ret = nvtzvault_write_process_name(&ctx->buf_ctx);
if (ret != 0)
return ret;
// Trigger mailbox send, wait for response
ret = oesp_mailbox_send_and_read(ctx->buf_ctx.buf_ptr, ctx->buf_ctx.current_offset,
ctx->task_opcode, ctx->driver_id);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read mailbox response: %d\n", __func__, ret);
return ret;
}
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Read response header
ret = nvtzvault_tee_check_overflow_and_read(&ctx->buf_ctx, &resp_hdr,
sizeof(struct nvtzvault_session_resp_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read response header: %d\n", __func__, ret);
return ret;
}
ret = nvtzvault_tee_translate_saerror_to_syserror(
(enum nvtzvault_tzv_error)resp_hdr.result);
if (ret != 0) {
NVTZVAULT_ERR("%s: SA returned error: %d\n", __func__, resp_hdr.result);
return ret;
}
// Read response parameters
ret = nvtzvault_tee_read_all_params(&ctx->buf_ctx, &cmd_id,
&invoke_cmd_ctl->operation.param_types, invoke_cmd_ctl->operation.params);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read parameters: %d\n", __func__, ret);
return ret;
}
return ret;
}
static int nvtzvault_close_session(struct nvtzvault_ctx *ctx,
struct nvtzvault_close_session_ctl *close_session_ctl)
{
int ret = 0;
struct nvtzvault_session_req_hdr req_hdr = {NVTZVAULT_SESSION_OP_CLOSE,
close_session_ctl->session_id, 0U};
struct nvtzvault_session_resp_hdr resp_hdr;
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Validate session is open
if (!is_session_open(ctx, close_session_ctl->session_id)) {
NVTZVAULT_ERR("%s: Session %u not open\n", __func__, close_session_ctl->session_id);
return -EINVAL;
}
// Write header
ret = nvtzvault_tee_check_overflow_and_write(&ctx->buf_ctx, &req_hdr,
sizeof(struct nvtzvault_session_req_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write request header: %d\n", __func__, ret);
return ret;
}
// Prepare request and serialize parameters with no params
ret = nvtzvault_tee_write_all_params(&ctx->buf_ctx, 0xFFFFFFFF, 0U, NULL);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to write parameters: %d\n", __func__, ret);
return ret;
}
// Write process name (12 chars, zero-padded if needed)
ret = nvtzvault_write_process_name(&ctx->buf_ctx);
if (ret != 0)
return ret;
// Trigger mailbox send, wait for response
ret = oesp_mailbox_send_and_read(ctx->buf_ctx.buf_ptr, ctx->buf_ctx.current_offset,
ctx->task_opcode, ctx->driver_id);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read mailbox response: %d\n", __func__, ret);
return ret;
}
nvtzvault_tee_buf_context_reset(&ctx->buf_ctx);
// Read response header
ret = nvtzvault_tee_check_overflow_and_read(&ctx->buf_ctx, &resp_hdr,
sizeof(struct nvtzvault_session_resp_hdr), false);
if (ret != 0) {
NVTZVAULT_ERR("%s: Failed to read response header: %d\n", __func__, ret);
return ret;
}
ret = nvtzvault_tee_translate_saerror_to_syserror(
(enum nvtzvault_tzv_error)resp_hdr.result);
if (ret == 0) {
// Only clear session if close was successful
set_session_closed(ctx, close_session_ctl->session_id);
} else {
NVTZVAULT_ERR("%s: SA returned error: %d\n", __func__, ret);
}
return ret;
}
static long nvtzvault_ta_dev_ioctl(struct file *filp, unsigned int ioctl_num, unsigned long arg)
{
struct nvtzvault_ctx *ctx = filp->private_data;
struct nvtzvault_open_session_ctl *open_session_ctl;
struct nvtzvault_invoke_cmd_ctl *invoke_cmd_ctl;
struct nvtzvault_close_session_ctl *close_session_ctl;
int ret = 0;
if (!ctx) {
NVTZVAULT_ERR("%s(): ctx not allocated\n", __func__);
return -EPERM;
}
mutex_lock(&g_nvtzvault_dev.lock);
switch (ioctl_num) {
case NVTZVAULT_IOCTL_OPEN_SESSION:
open_session_ctl = kzalloc(sizeof(*open_session_ctl), GFP_KERNEL);
if (!open_session_ctl) {
NVTZVAULT_ERR("%s(): failed to allocate memory\n", __func__);
ret = -ENOMEM;
goto release_lock;
}
ret = copy_from_user(open_session_ctl, (void __user *)arg,
sizeof(*open_session_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user open_session_ctl:%d\n",
__func__, ret);
kfree(open_session_ctl);
goto release_lock;
}
ret = nvtzvault_open_session(ctx, open_session_ctl);
if (ret) {
NVTZVAULT_ERR("%s(): nvtzvault_open_session failed:%d\n", __func__, ret);
kfree(open_session_ctl);
goto release_lock;
}
ret = copy_to_user((void __user *)arg, open_session_ctl,
sizeof(*open_session_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user open_session_ctl:%d\n",
__func__, ret);
kfree(open_session_ctl);
goto release_lock;
}
kfree(open_session_ctl);
break;
case NVTZVAULT_IOCTL_INVOKE_CMD:
invoke_cmd_ctl = kzalloc(sizeof(*invoke_cmd_ctl), GFP_KERNEL);
if (!invoke_cmd_ctl) {
NVTZVAULT_ERR("%s(): failed to allocate memory\n", __func__);
ret = -ENOMEM;
goto release_lock;
}
ret = copy_from_user(invoke_cmd_ctl, (void __user *)arg, sizeof(*invoke_cmd_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user invoke_cmd_ctl:%d\n",
__func__, ret);
kfree(invoke_cmd_ctl);
goto release_lock;
}
ret = nvtzvault_invoke_cmd(ctx, invoke_cmd_ctl);
if (ret) {
NVTZVAULT_ERR("%s(): nvtzvault_invoke_cmd failed:%d\n", __func__, ret);
kfree(invoke_cmd_ctl);
goto release_lock;
}
ret = copy_to_user((void __user *)arg, invoke_cmd_ctl, sizeof(*invoke_cmd_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user invoke_cmd_ctl:%d\n",
__func__, ret);
kfree(invoke_cmd_ctl);
goto release_lock;
}
kfree(invoke_cmd_ctl);
break;
case NVTZVAULT_IOCTL_CLOSE_SESSION:
close_session_ctl = kzalloc(sizeof(*close_session_ctl), GFP_KERNEL);
if (!close_session_ctl) {
NVTZVAULT_ERR("%s(): failed to allocate memory\n", __func__);
ret = -ENOMEM;
goto release_lock;
}
ret = copy_from_user(close_session_ctl, (void __user *)arg,
sizeof(*close_session_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user close_session_ctl:%d\n",
__func__, ret);
kfree(close_session_ctl);
goto release_lock;
}
ret = nvtzvault_close_session(ctx, close_session_ctl);
if (ret) {
NVTZVAULT_ERR("%s(): nvtzvault_close_session failed:%d\n", __func__, ret);
kfree(close_session_ctl);
goto release_lock;
}
ret = copy_to_user((void __user *)arg, close_session_ctl,
sizeof(*close_session_ctl));
if (ret) {
NVTZVAULT_ERR("%s(): Failed to copy_from_user close_session_ctl:%d\n",
__func__, ret);
kfree(close_session_ctl);
goto release_lock;
}
kfree(close_session_ctl);
break;
default:
NVTZVAULT_ERR("%s(): Unsupported IOCTL command", __func__);
ret = -EINVAL;
break;
}
release_lock:
mutex_unlock(&g_nvtzvault_dev.lock);
return ret;
}
static const struct file_operations nvtzvault_ta_fops = {
.owner = THIS_MODULE,
.open = nvtzvault_ta_dev_open,
.release = nvtzvault_ta_dev_release,
.unlocked_ioctl = nvtzvault_ta_dev_ioctl,
};
static int nvtzvault_validate_ta_params(uint32_t const ta_id, uint8_t const * const ta_uuid,
uint32_t const ta_count)
{
struct nvtzvault_ta *ta;
uint32_t i;
if (ta_id >= ta_count) {
NVTZVAULT_ERR("%s: invalid ta id %u\n", __func__, ta_id);
return -EINVAL;
}
if (ta_id > NVTZVAULT_MAX_TA_ID) {
NVTZVAULT_ERR("%s: unsupported ta id %u\n", __func__, ta_id);
return -EINVAL;
}
for (i = 0U; i < g_nvtzvault_dev.ta_count; i++) {
ta = &g_nvtzvault_dev.ta[i];
if (ta_id == ta->id) {
NVTZVAULT_ERR("%s: ta id %u is already used\n", __func__, ta_id);
return -EINVAL;
}
if (memcmp(ta_uuid, ta->uuid, NVTZVAULT_TA_UUID_LEN) == 0) {
NVTZVAULT_ERR("%s: uuid for ta id %u is already used for ta id %u\n",
__func__, ta_id, i);
return -EINVAL;
}
}
return 0;
}
static int nvtzvault_ta_create_dev_node(struct miscdevice *dev, uint32_t id)
{
const char * const node_prefix = "nvtzvault-ta-";
char *node_name;
char const numbers[] = "0123456789";
uint32_t str_len;
int32_t ret;
str_len = strlen(node_prefix);
if (str_len > (NVTZVAULT_TA_DEVICE_NAME_LEN - 3U)) {
NVTZVAULT_ERR("%s: device name length exceeds supported size", __func__);
return -ENOMEM;
}
node_name = kzalloc(NVTZVAULT_TA_DEVICE_NAME_LEN, GFP_KERNEL);
if (node_name == NULL)
return -ENOMEM;
dev->minor = MISC_DYNAMIC_MINOR;
dev->fops = &nvtzvault_ta_fops;
(void)memcpy(node_name, node_prefix, str_len);
node_name[str_len++] = numbers[id / 10U];
node_name[str_len++] = numbers[id % 10U];
node_name[str_len++] = '\0';
dev->name = node_name;
ret = misc_register(dev);
if (ret != 0) {
NVTZVAULT_ERR("%s: misc dev %u registration failed err %d\n", __func__, id, ret);
kfree(node_name);
return -ENOMEM;
}
dev->this_device->id = id;
return 0;
}
static int nvtzvault_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *nvtzvault_node = dev->of_node;
struct device_node *ta_node;
struct miscdevice *misc;
struct nvtzvault_ta *ta;
u32 ta_count;
u32 offset = 0U;
phandle ta_phandle;
phandle ta_id;
u8 ta_uuid[NVTZVAULT_TA_UUID_LEN];
uint32_t task_opcode;
uint32_t driver_id;
int len, i;
int ret;
if (!nvtzvault_node)
return -ENODEV;
ret = oesp_mailbox_init(pdev);
if (ret) {
dev_err(dev, "failed to initialize mailbox\n");
return ret;
}
// Read the 'ta-mapping' property from supported-tas
if (!of_get_property(nvtzvault_node, "ta-mapping", &len)) {
dev_err(dev, "ta-mapping property missing\n");
return -EINVAL;
}
if ((len % 2) != 0U) {
dev_err(dev, "ta-mapping property has invalid format\n");
return -EINVAL;
}
ta_count = (len / (sizeof(u32) * 2U));
if (ta_count >= NVTZVAULT_MAX_DEV_COUNT) {
dev_err(dev, "ta count exceeds max supported value\n");
return -EINVAL;
}
// Iterate over the entries in the 'supported-tas/config' array
for (i = 0; i < ta_count; i++) {
ta = &g_nvtzvault_dev.ta[i];
offset = i * 2U;
misc = kzalloc(sizeof(struct miscdevice), GFP_KERNEL);
if (misc == NULL) {
ret = -ENOMEM;
goto fail;
}
ta->dev = misc;
// Read the TA phandle
if (of_property_read_u32_index(nvtzvault_node, "ta-mapping", offset,
&ta_phandle)) {
dev_err(dev, "failed to get ta phandle\n");
ret = -EINVAL;
goto fail;
}
ta_node = of_find_node_by_phandle(ta_phandle);
if (!ta_node) {
dev_err(dev, "ta node not found\n");
ret = -EINVAL;
goto fail;
}
// Read the TA ID
if (of_property_read_u32_index(nvtzvault_node, "ta-mapping", (offset + 1U),
&ta_id)) {
dev_err(dev, "failed to get ta id\n");
ret = -EINVAL;
goto fail;
}
if (of_property_read_u8_array(ta_node, "uuid", ta_uuid, NVTZVAULT_TA_UUID_LEN)) {
dev_err(dev, "failed to get ta uuid\n");
ret = -EINVAL;
goto fail;
}
if (of_property_read_u32(ta_node, "op-code", &task_opcode)) {
dev_err(dev, "failed to get ta task-opcode\n");
ret = -EINVAL;
goto fail;
}
if (of_property_read_u32(ta_node, "driver-id", &driver_id)) {
dev_err(dev, "failed to get ta driver-id\n");
ret = -EINVAL;
goto fail;
}
ret = nvtzvault_validate_ta_params(ta_id, ta_uuid, ta_count);
if (ret != 0)
goto fail;
ta->id = ta_id;
(void)memcpy(ta->uuid, ta_uuid, NVTZVAULT_TA_UUID_LEN);
ta->task_opcode = task_opcode;
ta->driver_id = driver_id;
dev_info(dev, "TA ID: %u, UUID: %16ph\n", ta_id, ta_uuid);
ret = nvtzvault_ta_create_dev_node(ta->dev, ta->id);
if (ret != 0)
goto fail;
g_nvtzvault_dev.ta_count++;
}
g_nvtzvault_dev.data_buf = kzalloc(NVTZVAULT_BUFFER_SIZE, GFP_KERNEL);
if (!g_nvtzvault_dev.data_buf) {
dev_err(dev, "failed to allocate data buffer\n");
ret = -ENOMEM;
goto fail;
}
mutex_init(&g_nvtzvault_dev.lock);
return 0;
fail:
for (i = 0; i < g_nvtzvault_dev.ta_count; i++) {
ta = &g_nvtzvault_dev.ta[i];
if (ta->dev) {
misc_deregister(ta->dev);
kfree(ta->dev->name);
kfree(ta->dev);
}
}
return ret;
}
static int nvtzvault_remove(struct platform_device *pdev)
{
struct nvtzvault_ta *ta;
uint32_t i;
for (i = 0; i < g_nvtzvault_dev.ta_count; i++) {
ta = &g_nvtzvault_dev.ta[i];
if (ta->dev) {
misc_deregister(ta->dev);
kfree(ta->dev);
}
}
kfree(g_nvtzvault_dev.data_buf);
return 0;
}
#if defined(NV_PLATFORM_DRIVER_STRUCT_REMOVE_RETURNS_VOID) /* Linux v6.11 */
static void nvtzvault_remove_wrapper(struct platform_device *pdev)
{
nvtzvault_remove(pdev);
}
#else
static int nvtzvault_remove_wrapper(struct platform_device *pdev)
{
return nvtzvault_remove(pdev);
}
#endif
static const struct of_device_id nvtzvault_match[] = {
{.compatible = "nvidia,nvtzvault"},
{}
};
MODULE_DEVICE_TABLE(of, nvtzvault_match);
static struct platform_driver nvtzvault_driver = {
.probe = nvtzvault_probe,
.remove = nvtzvault_remove_wrapper,
.driver = {
.owner = THIS_MODULE,
.name = "nvtzvault",
.of_match_table = of_match_ptr(nvtzvault_match),
}
};
module_platform_driver(nvtzvault_driver);
MODULE_DESCRIPTION("NVIDIA TZVault driver");
MODULE_AUTHOR("Nvidia Corporation");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,270 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "oesp-mailbox.h"
#include "nvtzvault-common.h"
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/memory.h>
/* Mailbox Request register offsets */
#define REQ_OPCODE_OFFSET 0x800U
#define REQ_FORMAT_FLAG_OFFSET 0x804U
#define DRIVER_ID_OFFSET 0x808U
#define IOVA_LOW_OFFSET 0x80CU
#define IOVA_HIGH_OFFSET 0x810U
#define MSG_SIZE_OFFSET 0x814U
/* Mailbox Response register offsets */
#define RESP_OPCODE_OFFSET 0x1000U
#define RESP_FORMAT_FLAG_OFFSET 0x1004U
#define RESP_STATUS_OFFSET 0x1008U
/* Mailbox register offset */
#define EXT_CTRL_OFFSET 0x4U
#define PSC_CTRL_REG_OFFSET 0x8U
/* Mailbox register bitfields */
#define MBOX_IN_VALID 0x1U
#define LIC_INTR_EN 0x100U
#define MBOX_OUT_DONE 0x10U
#define MBOX_OUT_VALID 0x1U
#define MAX_MAILBOX 8U
#define MBOX_TIMEOUT_MS (300U * 1000U) /* Set to 5 minutes, required for Secure Storage */
static DECLARE_COMPLETION(mbox_completion);
struct mbox_ctx {
void *hpse_carveout_base_va;
uint64_t hpse_carveout_base_iova;
uint64_t hpse_carveout_size;
void *oesp_mbox_reg_base_va;
uint64_t oesp_mbox_reg_size;
} g_mbox_ctx;
struct mbox_req {
u32 task_opcode;
u32 format_flag;
u32 tos_driver_id;
u32 hpse_carveout_iova_lsb;
u32 hpse_carveout_iova_msb;
u32 msg_size;
};
struct mbox_resp {
u32 task_opcode;
u32 format_flag;
u32 status;
};
int32_t oesp_mailbox_send_and_read(void *buf_ptr, uint32_t buf_len, uint32_t task_opcode,
uint32_t driver_id)
{
struct mbox_req req;
struct mbox_resp resp;
u8 *oesp_reg_mem_ptr = g_mbox_ctx.oesp_mbox_reg_base_va;
volatile u32 reg_val;
int32_t ret = 0;
unsigned long timeout;
reinit_completion(&mbox_completion);
/* Send request */
req.task_opcode = task_opcode;
writel(req.task_opcode, oesp_reg_mem_ptr + REQ_OPCODE_OFFSET);
req.format_flag = 0x1+(0x0<<8)+('P'<<16)+('S'<<24);
writel(req.format_flag, oesp_reg_mem_ptr + REQ_FORMAT_FLAG_OFFSET);
req.tos_driver_id = driver_id;
writel(req.tos_driver_id, oesp_reg_mem_ptr + DRIVER_ID_OFFSET);
for (int i = 0; i < buf_len; i++)
((uint8_t *)g_mbox_ctx.hpse_carveout_base_va)[i] = ((uint8_t *)buf_ptr)[i];
req.hpse_carveout_iova_lsb = g_mbox_ctx.hpse_carveout_base_iova & 0xFFFFFFFFU;
writel(req.hpse_carveout_iova_lsb, oesp_reg_mem_ptr + IOVA_LOW_OFFSET);
req.hpse_carveout_iova_msb = g_mbox_ctx.hpse_carveout_base_iova >> 32U;
writel(req.hpse_carveout_iova_msb, oesp_reg_mem_ptr + IOVA_HIGH_OFFSET);
req.msg_size = buf_len;
writel(req.msg_size, oesp_reg_mem_ptr + MSG_SIZE_OFFSET);
reg_val = readl(oesp_reg_mem_ptr + EXT_CTRL_OFFSET);
writel((reg_val | MBOX_IN_VALID | LIC_INTR_EN), oesp_reg_mem_ptr + EXT_CTRL_OFFSET);
/* Ensure write buffer is flushed before waiting for response */
wmb();
/* Wait for response */
timeout = wait_for_completion_timeout(&mbox_completion,
msecs_to_jiffies(MBOX_TIMEOUT_MS));
if (timeout == 0) {
reg_val = readl(oesp_reg_mem_ptr + PSC_CTRL_REG_OFFSET);
if ((reg_val & MBOX_OUT_VALID) == 0x1U)
NVTZVAULT_ERR("%s: MBOX_OUT_VALID is set\n", __func__);
NVTZVAULT_ERR("%s: Timeout waiting for response\n", __func__);
ret = -ETIMEDOUT;
goto end;
}
/* Read response registers */
resp.task_opcode = readl(oesp_reg_mem_ptr + RESP_OPCODE_OFFSET);
resp.format_flag = readl(oesp_reg_mem_ptr + RESP_FORMAT_FLAG_OFFSET);
resp.status = readl(oesp_reg_mem_ptr + RESP_STATUS_OFFSET);
if (resp.status != 0x0U) {
NVTZVAULT_ERR("%s resp.status %u\n", __func__, resp.status);
ret = -EINVAL;
goto end;
}
/* Copy response data from carveout back to input buffer */
for (int i = 0; i < buf_len; i++)
((uint8_t *)buf_ptr)[i] = ((uint8_t *)g_mbox_ctx.hpse_carveout_base_va)[i];
end:
/* Set MBOX_OUT_DONE to acknowledge response */
reg_val = readl(oesp_reg_mem_ptr + EXT_CTRL_OFFSET);
reg_val |= MBOX_OUT_DONE;
writel(reg_val, oesp_reg_mem_ptr + EXT_CTRL_OFFSET);
return ret;
}
/**
* @brief Interrupt handler for HPSE mailbox
*
* Handles interrupts from the OESP mailbox by checking status registers
* and clearing interrupt flags.
*
* @param[in] irq The interrupt number
* @param[in] dev_id The device ID pointer passed during request_irq
*
* @return IRQ_HANDLED if interrupt was handled
* IRQ_NONE if interrupt was not for this device
*/
static irqreturn_t tegra_hpse_irq_handler(int irq, void *dev_id)
{
volatile u32 reg_val;
u8 *oesp_reg_mem_ptr = g_mbox_ctx.oesp_mbox_reg_base_va;
/* Read PSC control register to check interrupt status */
reg_val = readl(oesp_reg_mem_ptr + PSC_CTRL_REG_OFFSET);
/* Check if this is our interrupt */
if ((reg_val & MBOX_OUT_VALID) == 0U)
return IRQ_NONE;
/* Signal completion to waiting thread */
complete(&mbox_completion);
return IRQ_HANDLED;
}
int32_t oesp_mailbox_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *hpse_node = dev->of_node;
struct device_node *hpse_carveout_node;
struct device_node *oesp_mbox_node;
u64 hpse_carveout_base_iova;
void *hpse_carveout_base_va;
u64 hpse_carveout_size;
u64 oesp_mbox_reg_base_iova;
void *oesp_mbox_reg_base_va;
u64 oesp_mbox_reg_size;
int irq;
int ret;
if (!hpse_node)
return -ENODEV;
hpse_carveout_node = of_find_node_by_path("/reserved-memory/hpse-carveout");
if (!hpse_carveout_node) {
dev_err(dev, "hpse-carveout node missing\n");
return -EINVAL;
}
// Read the 'reg' property from the oesp-mailbox node
if (of_property_read_u64(hpse_carveout_node, "reg", &hpse_carveout_base_iova)) {
dev_err(dev, "reg property missing in hpse-carveout\n");
return -EINVAL;
}
if (!hpse_carveout_base_iova) {
dev_err(dev, "hpse carveout iova is NULL\n");
return -EINVAL;
}
// The size is the second u64 value in the 'reg' property
if (of_property_read_u64_index(hpse_carveout_node, "reg", 1, &hpse_carveout_size)) {
dev_err(dev, "reg size missing in hpse-carveout\n");
return -EINVAL;
}
hpse_carveout_base_va = devm_memremap(dev, hpse_carveout_base_iova,
hpse_carveout_size, MEMREMAP_WB);
if (IS_ERR_OR_NULL(hpse_carveout_base_va))
return -ENOMEM;
// Locate the oesp-mailbox node
oesp_mbox_node = of_find_node_by_name(hpse_node, "oesp-mailbox");
if (!oesp_mbox_node) {
dev_err(dev, "oesp-mailbox node missing\n");
return -EINVAL;
}
// Read the 'reg' property from the oesp-mailbox node
if (of_property_read_u64(oesp_mbox_node, "reg", &oesp_mbox_reg_base_iova)) {
dev_err(dev, "reg property missing for oesp mailbox\n");
return -EINVAL;
}
if (!oesp_mbox_reg_base_iova) {
dev_err(dev, "oesp reg base is NULL\n");
return -EINVAL;
}
// The size is the second u64 value in the 'reg' property
if (of_property_read_u64_index(oesp_mbox_node, "reg", 1, &oesp_mbox_reg_size)) {
dev_err(dev, "reg size missing for oesp mailbox\n");
return -EINVAL;
}
oesp_mbox_reg_base_va = devm_ioremap(dev, oesp_mbox_reg_base_iova, oesp_mbox_reg_size);
if (!oesp_mbox_reg_base_va) {
dev_err(dev, "ioremap failed\n");
return -EINVAL;
}
/* Get IRQ from tegra-hpse node */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "failed to get irq from tegra-hpse node: %d\n", irq);
return irq;
}
ret = devm_request_irq(dev, irq, tegra_hpse_irq_handler,
IRQF_ONESHOT, dev_name(dev), &g_mbox_ctx);
if (ret) {
dev_err(dev, "Failed to request IRQ %d: %d\n", irq, ret);
return ret;
}
g_mbox_ctx.hpse_carveout_base_va = hpse_carveout_base_va;
g_mbox_ctx.hpse_carveout_base_iova = hpse_carveout_base_iova;
g_mbox_ctx.hpse_carveout_size = hpse_carveout_size;
g_mbox_ctx.oesp_mbox_reg_base_va = oesp_mbox_reg_base_va;
g_mbox_ctx.oesp_mbox_reg_size = oesp_mbox_reg_size;
return 0;
}

View File

@@ -0,0 +1,63 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#ifndef OESP_MAILBOX_H
#define OESP_MAILBOX_H
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
/**
* @brief Context structure for OESP mailbox operations
*
* Contains the virtual address of mailbox registers and
* IOVA of the HPSE carveout memory used for communication.
*/
struct oesp_mailbox_context {
/** @brief Virtual address of the OESP mailbox registers */
uint8_t *reg_base_va;
/** @brief IOVA (I/O Virtual Address) of HPSE carveout memory */
uint64_t hpse_carveout_iova;
};
/**
* @brief Register HPSE mailbox interrupt handler
*
* @param[in] dev Platform device pointer
* @param[in] irq IRQ number to register
*
* @return 0 on success, negative error code on failure
*/
int32_t oesp_mailbox_register_irq(struct device *dev, int irq);
/**
* @brief Sends a request through the OESP mailbox and waits for response
*
* Triggers communication with the SA by writing the request to the mailbox
* registers and waits for the response. This is a blocking call that waits
* until response is received or timeout occurs.
*
* @param[in] buf_ptr Pointer to the buffer containing request data
* @param[in] buf_len Length of the request data
* @param[in] task_opcode Task opcode to be sent to the SA
* @param[in] driver_id Driver ID to be sent to the SA
* @return 0 on successful response
* Negative error code on failure:
* -ETIMEDOUT if response not received within timeout period
*/
int32_t oesp_mailbox_send_and_read(void *buf_ptr, uint32_t buf_len, uint32_t task_opcode,
uint32_t driver_id);
/**
* @brief Initialize HPSE mailbox context and register interrupt handler
*
* @param[in] pdev Platform device pointer
*
* @return 0 on success, negative error code on failure
* -ENODEV if HPSE node is not found
* -EINVAL if any required properties are missing
* -ENOMEM if memory allocation fails
*/
int32_t oesp_mailbox_init(struct platform_device *pdev);
#endif

View File

@@ -0,0 +1,61 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
* All rights reserved.
*/
#ifndef __UAPI_NVTZVAULT_IOCTL_H
#define __UAPI_NVTZVAULT_IOCTL_H
#include <asm-generic/ioctl.h>
#define NVTZVAULT_IOC_MAGIC 0x99
#define NVTZVAULT_CMDID_OPEN_SESSION (0x01U)
#define NVTZVAULT_CMDID_INVOKE_CMD (0x02U)
#define NVTZVAULT_CMDID_CLOSE_SESSION (0x03U)
#define NVTZVAULT_TA_MAX_PARAMS (8U)
#define NVTZVAULT_TA_UUID_LEN (16U)
struct nvtzvault_teec_memref {
void *buffer;
size_t size;
};
struct nvtzvault_teec_value {
uint32_t a;
uint32_t b;
};
struct nvtzvault_teec_parameter {
struct nvtzvault_teec_memref memref;
struct nvtzvault_teec_value value;
};
struct nvtzvault_teec_operation {
uint32_t started;
uint32_t param_types;
struct nvtzvault_teec_parameter params[NVTZVAULT_TA_MAX_PARAMS];
};
struct nvtzvault_open_session_ctl {
uint8_t uuid[NVTZVAULT_TA_UUID_LEN];
struct nvtzvault_teec_operation operation;
uint32_t session_id;
};
#define NVTZVAULT_IOCTL_OPEN_SESSION _IOW(NVTZVAULT_IOC_MAGIC, NVTZVAULT_CMDID_OPEN_SESSION, \
struct nvtzvault_open_session_ctl)
struct nvtzvault_invoke_cmd_ctl {
uint32_t session_id;
uint32_t command_id;
struct nvtzvault_teec_operation operation;
};
#define NVTZVAULT_IOCTL_INVOKE_CMD _IOW(NVTZVAULT_IOC_MAGIC, NVTZVAULT_CMDID_INVOKE_CMD, \
struct nvtzvault_invoke_cmd_ctl)
struct nvtzvault_close_session_ctl {
uint32_t session_id;
};
#define NVTZVAULT_IOCTL_CLOSE_SESSION _IOW(NVTZVAULT_IOC_MAGIC, NVTZVAULT_CMDID_CLOSE_SESSION, \
struct nvtzvault_close_session_ctl)
#endif