mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
nvidia-oot: protect IVC APIs using mutex
While accessing IVC framework APIs is causing concurrency issue leading to system crash. so protecting the APIs using mutex. Also removing the req_lock mutex which is being used unnecessarily since the shared resources are already protected by ivc_lock mutex. Bug 3937188 Signed-off-by: Manish Bhardwaj <mbhardwaj@nvidia.com> Change-Id: Ie49161b7f2659f9c44352d50f950d8f5a3cae32c Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2856249 Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: Suresh Venkatachalam <skathirampat@nvidia.com> Reviewed-by: Sandeep Trasi <strasi@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a62b3e3686
commit
a51a3b4d6f
@@ -56,8 +56,6 @@ static struct vsc_request *vblk_get_req(struct vblk_dev *vblkdev)
|
|||||||
struct vsc_request *req = NULL;
|
struct vsc_request *req = NULL;
|
||||||
unsigned long bit;
|
unsigned long bit;
|
||||||
|
|
||||||
mutex_lock(&vblkdev->req_lock);
|
|
||||||
|
|
||||||
if (vblkdev->queue_state != VBLK_QUEUE_ACTIVE)
|
if (vblkdev->queue_state != VBLK_QUEUE_ACTIVE)
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
@@ -70,7 +68,6 @@ static struct vsc_request *vblk_get_req(struct vblk_dev *vblkdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
mutex_unlock(&vblkdev->req_lock);
|
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +79,6 @@ static struct vsc_request *vblk_get_req_by_sr_num(struct vblk_dev *vblkdev,
|
|||||||
if (num >= vblkdev->max_requests)
|
if (num >= vblkdev->max_requests)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
mutex_lock(&vblkdev->req_lock);
|
|
||||||
req = &vblkdev->reqs[num];
|
req = &vblkdev->reqs[num];
|
||||||
if (test_bit(req->id, vblkdev->pending_reqs) == 0) {
|
if (test_bit(req->id, vblkdev->pending_reqs) == 0) {
|
||||||
dev_err(vblkdev->device,
|
dev_err(vblkdev->device,
|
||||||
@@ -90,7 +86,6 @@ static struct vsc_request *vblk_get_req_by_sr_num(struct vblk_dev *vblkdev,
|
|||||||
req->id);
|
req->id);
|
||||||
req = NULL;
|
req = NULL;
|
||||||
}
|
}
|
||||||
mutex_unlock(&vblkdev->req_lock);
|
|
||||||
|
|
||||||
/* Assuming serial number is same as index into request array */
|
/* Assuming serial number is same as index into request array */
|
||||||
return req;
|
return req;
|
||||||
@@ -116,12 +111,11 @@ static void vblk_put_req(struct vsc_request *req)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&vblkdev->req_lock);
|
|
||||||
if (req != &vblkdev->reqs[req->id]) {
|
if (req != &vblkdev->reqs[req->id]) {
|
||||||
dev_err(vblkdev->device,
|
dev_err(vblkdev->device,
|
||||||
"Request Index %d does not match with the request!\n",
|
"Request Index %d does not match with the request!\n",
|
||||||
req->id);
|
req->id);
|
||||||
goto exit;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(req->id, vblkdev->pending_reqs) == 0) {
|
if (test_bit(req->id, vblkdev->pending_reqs) == 0) {
|
||||||
@@ -140,8 +134,6 @@ static void vblk_put_req(struct vsc_request *req)
|
|||||||
complete(&vblkdev->req_queue_empty);
|
complete(&vblkdev->req_queue_empty);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
exit:
|
|
||||||
mutex_unlock(&vblkdev->req_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vblk_send_config_cmd(struct vblk_dev *vblkdev)
|
static int vblk_send_config_cmd(struct vblk_dev *vblkdev)
|
||||||
@@ -911,11 +903,6 @@ static void setup_device(struct vblk_dev *vblkdev)
|
|||||||
vblkdev->config.blk_config.num_blks *
|
vblkdev->config.blk_config.num_blks *
|
||||||
vblkdev->config.blk_config.hardblk_size;
|
vblkdev->config.blk_config.hardblk_size;
|
||||||
|
|
||||||
spin_lock_init(&vblkdev->lock);
|
|
||||||
spin_lock_init(&vblkdev->queue_lock);
|
|
||||||
mutex_init(&vblkdev->ioctl_lock);
|
|
||||||
mutex_init(&vblkdev->ivc_lock);
|
|
||||||
|
|
||||||
memset(&vblkdev->tag_set, 0, sizeof(vblkdev->tag_set));
|
memset(&vblkdev->tag_set, 0, sizeof(vblkdev->tag_set));
|
||||||
vblkdev->tag_set.ops = &vblk_mq_ops;
|
vblkdev->tag_set.ops = &vblk_mq_ops;
|
||||||
vblkdev->tag_set.nr_hw_queues = 1;
|
vblkdev->tag_set.nr_hw_queues = 1;
|
||||||
@@ -1090,7 +1077,6 @@ static void setup_device(struct vblk_dev *vblkdev)
|
|||||||
"maximum requests set to 0!\n");
|
"maximum requests set to 0!\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_init(&vblkdev->req_lock);
|
|
||||||
|
|
||||||
vblkdev->max_requests = max_requests;
|
vblkdev->max_requests = max_requests;
|
||||||
vblkdev->max_ioctl_requests = max_ioctl_requests;
|
vblkdev->max_ioctl_requests = max_ioctl_requests;
|
||||||
@@ -1214,17 +1200,25 @@ static void vblk_init_device(struct work_struct *ws)
|
|||||||
{
|
{
|
||||||
struct vblk_dev *vblkdev = container_of(ws, struct vblk_dev, init);
|
struct vblk_dev *vblkdev = container_of(ws, struct vblk_dev, init);
|
||||||
|
|
||||||
|
mutex_lock(&vblkdev->ivc_lock);
|
||||||
/* wait for ivc channel reset to finish */
|
/* wait for ivc channel reset to finish */
|
||||||
if (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0)
|
if (tegra_hv_ivc_channel_notified(vblkdev->ivck) != 0) {
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
return; /* this will be rescheduled by irq handler */
|
return; /* this will be rescheduled by irq handler */
|
||||||
|
}
|
||||||
|
|
||||||
if (tegra_hv_ivc_can_read(vblkdev->ivck) && !vblkdev->initialized) {
|
if (tegra_hv_ivc_can_read(vblkdev->ivck) && !vblkdev->initialized) {
|
||||||
if (vblk_get_configinfo(vblkdev))
|
if (vblk_get_configinfo(vblkdev)) {
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
vblkdev->initialized = true;
|
vblkdev->initialized = true;
|
||||||
setup_device(vblkdev);
|
setup_device(vblkdev);
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t ivc_irq_handler(int irq, void *data)
|
static irqreturn_t ivc_irq_handler(int irq, void *data)
|
||||||
@@ -1310,6 +1304,11 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
|||||||
#endif
|
#endif
|
||||||
vblkdev->queue_state = VBLK_QUEUE_ACTIVE;
|
vblkdev->queue_state = VBLK_QUEUE_ACTIVE;
|
||||||
|
|
||||||
|
spin_lock_init(&vblkdev->lock);
|
||||||
|
spin_lock_init(&vblkdev->queue_lock);
|
||||||
|
mutex_init(&vblkdev->ioctl_lock);
|
||||||
|
mutex_init(&vblkdev->ivc_lock);
|
||||||
|
|
||||||
INIT_WORK(&vblkdev->init, vblk_init_device);
|
INIT_WORK(&vblkdev->init, vblk_init_device);
|
||||||
INIT_WORK(&vblkdev->work, vblk_request_work);
|
INIT_WORK(&vblkdev->work, vblk_request_work);
|
||||||
/* creating and initializing the an internal request list */
|
/* creating and initializing the an internal request list */
|
||||||
@@ -1322,13 +1321,15 @@ static int tegra_hv_vblk_probe(struct platform_device *pdev)
|
|||||||
goto free_wq;
|
goto free_wq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&vblkdev->ivc_lock);
|
||||||
tegra_hv_ivc_channel_reset(vblkdev->ivck);
|
tegra_hv_ivc_channel_reset(vblkdev->ivck);
|
||||||
if (vblk_send_config_cmd(vblkdev)) {
|
if (vblk_send_config_cmd(vblkdev)) {
|
||||||
dev_err(dev, "Failed to send config cmd\n");
|
dev_err(dev, "Failed to send config cmd\n");
|
||||||
ret = -EACCES;
|
ret = -EACCES;
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
goto free_wq;
|
goto free_wq;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&vblkdev->ivc_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_wq:
|
free_wq:
|
||||||
@@ -1378,13 +1379,13 @@ static int tegra_hv_vblk_suspend(struct device *dev)
|
|||||||
blk_mq_stop_hw_queues(vblkdev->queue);
|
blk_mq_stop_hw_queues(vblkdev->queue);
|
||||||
spin_unlock_irqrestore(&vblkdev->queue->queue_lock, flags);
|
spin_unlock_irqrestore(&vblkdev->queue->queue_lock, flags);
|
||||||
|
|
||||||
mutex_lock(&vblkdev->req_lock);
|
spin_lock(&vblkdev->lock);
|
||||||
vblkdev->queue_state = VBLK_QUEUE_SUSPENDED;
|
vblkdev->queue_state = VBLK_QUEUE_SUSPENDED;
|
||||||
|
|
||||||
/* Mark the queue as empty if inflight requests are 0 */
|
/* Mark the queue as empty if inflight requests are 0 */
|
||||||
if (vblkdev->inflight_reqs == 0)
|
if (vblkdev->inflight_reqs == 0)
|
||||||
complete(&vblkdev->req_queue_empty);
|
complete(&vblkdev->req_queue_empty);
|
||||||
mutex_unlock(&vblkdev->req_lock);
|
spin_unlock(&vblkdev->lock);
|
||||||
|
|
||||||
wait_for_completion(&vblkdev->req_queue_empty);
|
wait_for_completion(&vblkdev->req_queue_empty);
|
||||||
disable_irq(vblkdev->ivck->irq);
|
disable_irq(vblkdev->ivck->irq);
|
||||||
@@ -1406,10 +1407,10 @@ static int tegra_hv_vblk_resume(struct device *dev)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
if (vblkdev->queue) {
|
if (vblkdev->queue) {
|
||||||
mutex_lock(&vblkdev->req_lock);
|
spin_lock(&vblkdev->lock);
|
||||||
vblkdev->queue_state = VBLK_QUEUE_ACTIVE;
|
vblkdev->queue_state = VBLK_QUEUE_ACTIVE;
|
||||||
reinit_completion(&vblkdev->req_queue_empty);
|
reinit_completion(&vblkdev->req_queue_empty);
|
||||||
mutex_unlock(&vblkdev->req_lock);
|
spin_unlock(&vblkdev->lock);
|
||||||
|
|
||||||
enable_irq(vblkdev->ivck->irq);
|
enable_irq(vblkdev->ivck->irq);
|
||||||
|
|
||||||
|
|||||||
@@ -104,7 +104,6 @@ struct vblk_dev {
|
|||||||
uint32_t hsierror_status;
|
uint32_t hsierror_status;
|
||||||
struct completion hsierror_handle;
|
struct completion hsierror_handle;
|
||||||
#endif
|
#endif
|
||||||
struct mutex req_lock;
|
|
||||||
struct mutex ivc_lock;
|
struct mutex ivc_lock;
|
||||||
enum vblk_queue_state queue_state;
|
enum vblk_queue_state queue_state;
|
||||||
struct completion req_queue_empty;
|
struct completion req_queue_empty;
|
||||||
|
|||||||
Reference in New Issue
Block a user