mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 02:22:34 +03:00
gpu: nvgpu: create separate mutex for IMEM and DMEM access
Access to IMEM and DMEM can be done parallely as they have separate control and data registers. Hence they need not be synchronized using single copy_lock. Prepare separate mutex locks. JIRA NVGPU-1993 Change-Id: Ie4bfcb6cef0259c6fb98a86bdbcc378ff5725ee5 Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2030617 Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f1c9c1ebc0
commit
ad1842d4a2
@@ -343,9 +343,9 @@ int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&flcn->copy_lock);
|
||||
nvgpu_mutex_acquire(&flcn->dmem_lock);
|
||||
status = flcn_ops->copy_from_dmem(flcn, src, dst, size, port);
|
||||
nvgpu_mutex_release(&flcn->copy_lock);
|
||||
nvgpu_mutex_release(&flcn->dmem_lock);
|
||||
|
||||
exit:
|
||||
return status;
|
||||
@@ -374,9 +374,9 @@ int nvgpu_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&flcn->copy_lock);
|
||||
nvgpu_mutex_acquire(&flcn->dmem_lock);
|
||||
status = flcn_ops->copy_to_dmem(flcn, dst, src, size, port);
|
||||
nvgpu_mutex_release(&flcn->copy_lock);
|
||||
nvgpu_mutex_release(&flcn->dmem_lock);
|
||||
|
||||
exit:
|
||||
return status;
|
||||
@@ -405,9 +405,9 @@ int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&flcn->copy_lock);
|
||||
nvgpu_mutex_acquire(&flcn->imem_lock);
|
||||
status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
|
||||
nvgpu_mutex_release(&flcn->copy_lock);
|
||||
nvgpu_mutex_release(&flcn->imem_lock);
|
||||
|
||||
exit:
|
||||
return status;
|
||||
@@ -436,10 +436,10 @@ int nvgpu_falcon_copy_to_imem(struct nvgpu_falcon *flcn,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&flcn->copy_lock);
|
||||
nvgpu_mutex_acquire(&flcn->imem_lock);
|
||||
status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
|
||||
sec, tag);
|
||||
nvgpu_mutex_release(&flcn->copy_lock);
|
||||
nvgpu_mutex_release(&flcn->imem_lock);
|
||||
|
||||
exit:
|
||||
return status;
|
||||
@@ -735,9 +735,17 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&flcn->copy_lock);
|
||||
err = nvgpu_mutex_init(&flcn->imem_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
|
||||
nvgpu_err(g, "Error in flcn.imem_lock mutex initialization");
|
||||
nvgpu_kfree(g, flcn);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&flcn->dmem_lock);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "Error in flcn.dmem_lock mutex initialization");
|
||||
nvgpu_mutex_destroy(&flcn->imem_lock);
|
||||
nvgpu_kfree(g, flcn);
|
||||
return err;
|
||||
}
|
||||
@@ -763,7 +771,8 @@ void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
|
||||
|
||||
flcn = *flcn_p;
|
||||
gops->falcon.falcon_hal_sw_free(flcn);
|
||||
nvgpu_mutex_destroy(&flcn->copy_lock);
|
||||
nvgpu_mutex_destroy(&flcn->dmem_lock);
|
||||
nvgpu_mutex_destroy(&flcn->imem_lock);
|
||||
nvgpu_kfree(g, flcn);
|
||||
*flcn_p = NULL;
|
||||
}
|
||||
|
||||
@@ -114,7 +114,8 @@ struct nvgpu_falcon {
|
||||
u32 flcn_base;
|
||||
bool is_falcon_supported;
|
||||
bool is_interrupt_enabled;
|
||||
struct nvgpu_mutex copy_lock;
|
||||
struct nvgpu_mutex imem_lock;
|
||||
struct nvgpu_mutex dmem_lock;
|
||||
struct nvgpu_falcon_ops flcn_ops;
|
||||
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user