NVVSE: protect shared resources through mutex

Concurrent access of shared variable causing the issue intermittently.
Conditions to wait/handle shared variable updated.
Added mutex locks to protect shared resources.

bug 4155882
Bug 4159212
jira ESSS-707

Change-Id: I3b50e39abf2dfec2ef33b813c7227fcd0634222a
Signed-off-by: Advaya Andhare <aandhare@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2928908
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: Manish Bhardwaj <mbhardwaj@nvidia.com>
Reviewed-by: Leo Chiu <lchiu@nvidia.com>
Reviewed-by: Vipin Kumar <vipink@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
Advaya Andhare
2023-06-27 14:45:10 +00:00
committed by mobile promotions
parent 967fa5d4b4
commit 883ddf60ca
2 changed files with 41 additions and 15 deletions

View File

@@ -664,10 +664,15 @@ static int read_and_validate_valid_msg(
int read_size = -1, err = 0; int read_size = -1, err = 0;
size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t);
irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt); mutex_lock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
irq_state = &(g_crypto_to_ivc_map[node_id].wait_interrupt);
mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
if (!tegra_hv_ivc_can_read(pivck)) { if (!tegra_hv_ivc_can_read(pivck)) {
mutex_lock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
*irq_state = INTERMEDIATE_REQ_INTERRUPT; *irq_state = INTERMEDIATE_REQ_INTERRUPT;
dev_err(se_dev->dev, "%s(): no valid message, await interrupt.\n", __func__); mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
dev_info(se_dev->dev, "%s(): no valid message, await interrupt.\n", __func__);
return -EAGAIN; return -EAGAIN;
} }
@@ -801,7 +806,7 @@ static int tegra_hv_vse_safety_send_ivc_wait(
int err; int err;
bool is_dummy = false; bool is_dummy = false;
u64 time_left; u64 time_left;
enum ivc_irq_state *irq_state; enum ivc_irq_state *irq_state, local_irq_state;
mutex_lock(&g_crypto_to_ivc_map[node_id].se_ivc_lock); mutex_lock(&g_crypto_to_ivc_map[node_id].se_ivc_lock);
@@ -830,8 +835,11 @@ static int tegra_hv_vse_safety_send_ivc_wait(
goto exit; goto exit;
} }
mutex_lock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt); irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt);
if (*irq_state == NO_INTERRUPT) { local_irq_state = *irq_state;
mutex_unlock(&(g_crypto_to_ivc_map[node_id].irq_state_lock));
if (local_irq_state == NO_INTERRUPT) {
err = read_and_validate_dummy_msg(se_dev, pivck, node_id, &is_dummy); err = read_and_validate_dummy_msg(se_dev, pivck, node_id, &is_dummy);
if (err != 0) { if (err != 0) {
dev_err(se_dev->dev, "Failed to read and validate dummy message.\n"); dev_err(se_dev->dev, "Failed to read and validate dummy message.\n");
@@ -843,9 +851,9 @@ static int tegra_hv_vse_safety_send_ivc_wait(
dev_err(se_dev->dev, "Failed to read & validate valid message.\n"); dev_err(se_dev->dev, "Failed to read & validate valid message.\n");
goto exit; goto exit;
} }
if (*irq_state == INTERMEDIATE_REQ_INTERRUPT) { if (err == -EAGAIN) {
err = 0; err = 0;
pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); pr_debug("%s(): wait_interrupt = %u", __func__, local_irq_state);
time_left = wait_for_completion_timeout(&priv->alg_complete, time_left = wait_for_completion_timeout(&priv->alg_complete,
TEGRA_HV_VSE_TIMEOUT); TEGRA_HV_VSE_TIMEOUT);
if (time_left == 0) { if (time_left == 0) {
@@ -854,7 +862,7 @@ static int tegra_hv_vse_safety_send_ivc_wait(
goto exit; goto exit;
} }
} }
pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); pr_debug("%s(): wait_interrupt = %u", __func__, local_irq_state);
} else { } else {
dev_err(se_dev->dev, dev_err(se_dev->dev,
"%s(): Invalid resonse sequence, expected dummy message.\n", "%s(): Invalid resonse sequence, expected dummy message.\n",
@@ -4629,10 +4637,9 @@ static int tegra_vse_kthread(void *data)
int ret; int ret;
bool is_dummy = false; bool is_dummy = false;
size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t); size_t size_ivc_msg = sizeof(struct tegra_virtual_se_ivc_msg_t);
enum ivc_irq_state *irq_state; enum ivc_irq_state *irq_state, local_irq_state;
se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].se_engine]; se_dev = g_virtual_se_dev[g_crypto_to_ivc_map[node_id].se_engine];
irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt);
ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL); ivc_msg = devm_kzalloc(se_dev->dev, size_ivc_msg, GFP_KERNEL);
if (!ivc_msg) if (!ivc_msg)
@@ -4671,9 +4678,13 @@ static int tegra_vse_kthread(void *data)
continue; continue;
} }
while (tegra_hv_ivc_can_read(pivck)) { mutex_lock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
pr_debug("%s(): wait_interrupt = %u", __func__, *irq_state); irq_state = &(se_dev->crypto_to_ivc_map[node_id].wait_interrupt);
if (*irq_state == INTERMEDIATE_REQ_INTERRUPT) { local_irq_state = *irq_state;
mutex_unlock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
while (tegra_hv_ivc_can_read(pivck) && local_irq_state != NO_INTERRUPT) {
pr_debug("%s(): wait_interrupt = %u", __func__, local_irq_state);
if (local_irq_state == INTERMEDIATE_REQ_INTERRUPT) {
err = read_and_validate_valid_msg(se_dev, pivck, node_id, err = read_and_validate_valid_msg(se_dev, pivck, node_id,
&is_dummy, true); &is_dummy, true);
if (err != 0) { if (err != 0) {
@@ -4681,11 +4692,15 @@ static int tegra_vse_kthread(void *data)
"%s(): Unable to read validate message", "%s(): Unable to read validate message",
__func__); __func__);
} }
mutex_lock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
*irq_state = NO_INTERRUPT; *irq_state = NO_INTERRUPT;
local_irq_state = *irq_state;
mutex_unlock(&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
pr_debug("%s():%d wait_interrupt = %u\n", pr_debug("%s():%d wait_interrupt = %u\n",
__func__, __LINE__, *irq_state); __func__, __LINE__, local_irq_state);
break; break;
} else if (*irq_state == FIRST_REQ_INTERRUPT) {
} else if (local_irq_state == FIRST_REQ_INTERRUPT) {
err = read_and_validate_dummy_msg(se_dev, pivck, node_id, err = read_and_validate_dummy_msg(se_dev, pivck, node_id,
&is_dummy); &is_dummy);
if (err != 0) { if (err != 0) {
@@ -4695,7 +4710,12 @@ static int tegra_vse_kthread(void *data)
continue; continue;
} }
if (is_dummy == true) { if (is_dummy == true) {
mutex_lock(
&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
*irq_state = INTERMEDIATE_REQ_INTERRUPT; *irq_state = INTERMEDIATE_REQ_INTERRUPT;
local_irq_state = *irq_state;
mutex_unlock(
&(se_dev->crypto_to_ivc_map[node_id].irq_state_lock));
pr_debug("%s():%d Dummy message read. Read valid message.", pr_debug("%s():%d Dummy message read. Read valid message.",
__func__, __LINE__); __func__, __LINE__);
continue; continue;
@@ -4703,6 +4723,9 @@ static int tegra_vse_kthread(void *data)
dev_err(se_dev->dev, "Invalid response sequence"); dev_err(se_dev->dev, "Invalid response sequence");
break; break;
} }
} else {
dev_err(se_dev->dev, "Invalid irq state - %u", local_irq_state);
return -EINVAL;
} }
} }
} }
@@ -4988,6 +5011,7 @@ static int tegra_hv_vse_safety_probe(struct platform_device *pdev)
tegra_hv_ivc_channel_reset(crypto_dev->ivck); tegra_hv_ivc_channel_reset(crypto_dev->ivck);
init_completion(&crypto_dev->tegra_vse_complete); init_completion(&crypto_dev->tegra_vse_complete);
mutex_init(&crypto_dev->se_ivc_lock); mutex_init(&crypto_dev->se_ivc_lock);
mutex_init(&crypto_dev->irq_state_lock);
crypto_dev->tegra_vse_task = kthread_run(tegra_vse_kthread, &crypto_dev->node_id, crypto_dev->tegra_vse_task = kthread_run(tegra_vse_kthread, &crypto_dev->node_id,
"tegra_vse_kthread-%u", node_id); "tegra_vse_kthread-%u", node_id);
@@ -5124,7 +5148,8 @@ static void tegra_hv_vse_safety_shutdown(struct platform_device *pdev)
if (g_crypto_to_ivc_map[cnt].se_engine == se_dev->engine_id if (g_crypto_to_ivc_map[cnt].se_engine == se_dev->engine_id
&& g_crypto_to_ivc_map[cnt].ivck != NULL) { && g_crypto_to_ivc_map[cnt].ivck != NULL) {
/* Wait for SE server to be free*/ /* Wait for SE server to be free*/
while (mutex_is_locked(&g_crypto_to_ivc_map[cnt].se_ivc_lock)) while (mutex_is_locked(&g_crypto_to_ivc_map[cnt].se_ivc_lock)
|| mutex_is_locked(&g_crypto_to_ivc_map[cnt].irq_state_lock))
usleep_range(8, 10); usleep_range(8, 10);
} }
} }

View File

@@ -46,6 +46,7 @@ struct crypto_dev_to_ivc_map {
* 2: awaiting actual message, wait for interrupt * 2: awaiting actual message, wait for interrupt
*/ */
enum ivc_irq_state wait_interrupt; enum ivc_irq_state wait_interrupt;
struct mutex irq_state_lock;
}; };
struct tegra_virtual_se_dev { struct tegra_virtual_se_dev {