nvtzvault: take lock before close session

- need to take lock to avoid race conditions when accessing
  mailbox.
- don't use spinlock in irq context as irq is not shared
  and only one request is processed at a time.
- remove debug print.

Bug 5225204
Bug 5275408

Change-Id: I2de5a07d06568012d6557d3a32126478b6ddaf15
Signed-off-by: Nagaraj P N <nagarajp@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3356286
(cherry picked from commit 521e75b7c3cbc5f4724a8a0f3c11c3a2940383b2)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/3364010
GVS: buildbot_gerritrpt <buildbot_gerritrpt@nvidia.com>
Reviewed-by: Sandeep Trasi <strasi@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: svcacv <svcacv@nvidia.com>
This commit is contained in:
Nagaraj P N
2025-05-06 16:42:11 +05:30
committed by Jon Hunter
parent ef0bf7c94c
commit 7e77b140e3
2 changed files with 19 additions and 16 deletions

View File

@@ -198,7 +198,7 @@ static int nvtzvault_open_session(struct nvtzvault_ctx *ctx,
ret = nvtzvault_tee_translate_saerror_to_syserror( ret = nvtzvault_tee_translate_saerror_to_syserror(
resp_hdr.result); resp_hdr.result);
if (ret != 0) { if (ret != 0) {
NVTZVAULT_ERR("%s: SA returned error: %d(%x)\n", __func__, resp_hdr.result, NVTZVAULT_ERR("%s: SA returned error: %d(0x%x)\n", __func__, resp_hdr.result,
resp_hdr.result); resp_hdr.result);
return ret; return ret;
} }
@@ -284,7 +284,7 @@ static int nvtzvault_invoke_cmd(struct nvtzvault_ctx *ctx,
ret = nvtzvault_tee_translate_saerror_to_syserror( ret = nvtzvault_tee_translate_saerror_to_syserror(
resp_hdr.result); resp_hdr.result);
if (ret != 0) { if (ret != 0) {
NVTZVAULT_ERR("%s: SA returned error: %d(%x)\n", __func__, resp_hdr.result, NVTZVAULT_ERR("%s: SA returned error: %d(0x%x)\n", __func__, resp_hdr.result,
resp_hdr.result); resp_hdr.result);
return ret; return ret;
} }
@@ -360,7 +360,7 @@ static int nvtzvault_close_session(struct nvtzvault_ctx *ctx,
// Only clear session if close was successful // Only clear session if close was successful
set_session_closed(ctx, close_session_ctl->session_id); set_session_closed(ctx, close_session_ctl->session_id);
} else { } else {
NVTZVAULT_ERR("%s: SA returned error: %d(%x)\n", __func__, resp_hdr.result, NVTZVAULT_ERR("%s: SA returned error: %d(0x%x)\n", __func__, resp_hdr.result,
resp_hdr.result); resp_hdr.result);
} }
@@ -406,13 +406,14 @@ static int nvtzvault_ta_dev_release(struct inode *inode, struct file *filp)
struct nvtzvault_close_session_ctl close_session_ctl; struct nvtzvault_close_session_ctl close_session_ctl;
if (atomic_read(&g_nvtzvault_dev.total_active_session_count) > 0) { if (atomic_read(&g_nvtzvault_dev.total_active_session_count) > 0) {
mutex_lock(&g_nvtzvault_dev.lock);
for (uint32_t i = 0; i < NVTZVAULT_MAX_SESSIONS; i++) { for (uint32_t i = 0; i < NVTZVAULT_MAX_SESSIONS; i++) {
if (is_session_open(ctx, i)) { if (is_session_open(ctx, i)) {
NVTZVAULT_ERR("%s: closing session %u\n", __func__, i);
close_session_ctl.session_id = i; close_session_ctl.session_id = i;
nvtzvault_close_session(ctx, &close_session_ctl); nvtzvault_close_session(ctx, &close_session_ctl);
} }
} }
mutex_unlock(&g_nvtzvault_dev.lock);
} }
kfree(ctx); kfree(ctx);
@@ -434,13 +435,14 @@ static long nvtzvault_ta_dev_ioctl(struct file *filp, unsigned int ioctl_num, un
return -EPERM; return -EPERM;
} }
mutex_lock(&g_nvtzvault_dev.lock);
if (atomic_read(&g_nvtzvault_dev.in_suspend_state)) { if (atomic_read(&g_nvtzvault_dev.in_suspend_state)) {
NVTZVAULT_ERR("%s(): device is in suspend state\n", __func__); NVTZVAULT_ERR("%s(): device is in suspend state\n", __func__);
return -EBUSY; ret = -EBUSY;
goto release_lock;
} }
mutex_lock(&g_nvtzvault_dev.lock);
switch (ioctl_num) { switch (ioctl_num) {
case NVTZVAULT_IOCTL_OPEN_SESSION: case NVTZVAULT_IOCTL_OPEN_SESSION:
open_session_ctl = kzalloc(sizeof(*open_session_ctl), GFP_KERNEL); open_session_ctl = kzalloc(sizeof(*open_session_ctl), GFP_KERNEL);
@@ -825,11 +827,17 @@ static int nvtzvault_suspend(struct device *dev)
/* Add print to log in nvlog buffer */ /* Add print to log in nvlog buffer */
dev_err(dev, "%s start\n", __func__); dev_err(dev, "%s start\n", __func__);
if (atomic_read(&g_nvtzvault_dev.total_active_session_count) > 0) mutex_lock(&g_nvtzvault_dev.lock);
if (atomic_read(&g_nvtzvault_dev.total_active_session_count) > 0) {
mutex_unlock(&g_nvtzvault_dev.lock);
return -EBUSY; return -EBUSY;
}
atomic_set(&g_nvtzvault_dev.in_suspend_state, 1); atomic_set(&g_nvtzvault_dev.in_suspend_state, 1);
mutex_unlock(&g_nvtzvault_dev.lock);
/* Add print to log in nvlog buffer */ /* Add print to log in nvlog buffer */
dev_err(dev, "%s done\n", __func__); dev_err(dev, "%s done\n", __func__);

View File

@@ -65,7 +65,6 @@ struct mbox_ctx {
uint64_t hpse_carveout_size; uint64_t hpse_carveout_size;
void *oesp_mbox_reg_base_va; void *oesp_mbox_reg_base_va;
uint64_t oesp_mbox_reg_size; uint64_t oesp_mbox_reg_size;
spinlock_t lock;
struct mbox_resp resp; struct mbox_resp resp;
} g_mbox_ctx; } g_mbox_ctx;
@@ -166,7 +165,6 @@ static irqreturn_t tegra_hpse_irq_handler(int irq, void *dev_id)
volatile u32 reg_val; volatile u32 reg_val;
u8 *oesp_reg_mem_ptr = g_mbox_ctx.oesp_mbox_reg_base_va; u8 *oesp_reg_mem_ptr = g_mbox_ctx.oesp_mbox_reg_base_va;
volatile struct mbox_resp *resp = &g_mbox_ctx.resp; volatile struct mbox_resp *resp = &g_mbox_ctx.resp;
unsigned long flags;
/* Read PSC control register to check interrupt status */ /* Read PSC control register to check interrupt status */
reg_val = readl(oesp_reg_mem_ptr + PSC_CTRL_REG_OFFSET); reg_val = readl(oesp_reg_mem_ptr + PSC_CTRL_REG_OFFSET);
@@ -175,13 +173,14 @@ static irqreturn_t tegra_hpse_irq_handler(int irq, void *dev_id)
if ((reg_val & MBOX_OUT_VALID) == 0U) if ((reg_val & MBOX_OUT_VALID) == 0U)
return IRQ_NONE; return IRQ_NONE;
spin_lock_irqsave(&g_mbox_ctx.lock, flags);
/* Read response registers */ /* Read response registers */
resp->task_opcode = readl(oesp_reg_mem_ptr + RESP_OPCODE_OFFSET); resp->task_opcode = readl(oesp_reg_mem_ptr + RESP_OPCODE_OFFSET);
resp->format_flag = readl(oesp_reg_mem_ptr + RESP_FORMAT_FLAG_OFFSET); resp->format_flag = readl(oesp_reg_mem_ptr + RESP_FORMAT_FLAG_OFFSET);
resp->status = readl(oesp_reg_mem_ptr + RESP_STATUS_OFFSET); resp->status = readl(oesp_reg_mem_ptr + RESP_STATUS_OFFSET);
/* Ensure register read is complete before acknowledging response */
rmb();
/* Set MBOX_OUT_DONE to acknowledge response */ /* Set MBOX_OUT_DONE to acknowledge response */
reg_val = readl(oesp_reg_mem_ptr + EXT_CTRL_OFFSET); reg_val = readl(oesp_reg_mem_ptr + EXT_CTRL_OFFSET);
reg_val |= MBOX_OUT_DONE; reg_val |= MBOX_OUT_DONE;
@@ -190,8 +189,6 @@ static irqreturn_t tegra_hpse_irq_handler(int irq, void *dev_id)
/* Signal completion to waiting thread */ /* Signal completion to waiting thread */
complete(&mbox_completion); complete(&mbox_completion);
spin_unlock_irqrestore(&g_mbox_ctx.lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@@ -277,8 +274,6 @@ int32_t oesp_mailbox_init(struct platform_device *pdev)
g_mbox_ctx.oesp_mbox_reg_base_va = oesp_mbox_reg_base_va; g_mbox_ctx.oesp_mbox_reg_base_va = oesp_mbox_reg_base_va;
g_mbox_ctx.oesp_mbox_reg_size = oesp_mbox_reg_size; g_mbox_ctx.oesp_mbox_reg_size = oesp_mbox_reg_size;
spin_lock_init(&g_mbox_ctx.lock);
/* Get IRQ from tegra-hpse node */ /* Get IRQ from tegra-hpse node */
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {