gpu: nvgpu: add EMEM support enabled flag and EMEM mutex

Access to falcon's EMEM has to be synchronized to ensure atomic access
to EMEM control and data registers. Add this locking.
Not all falcons support EMEM hence handle mutex based on the enabled
flag emem_supported that is set only for TU104 currently.

JIRA NVGPU-1993

Change-Id: Idaedfb564ea0068d4690a2717d7983eb2384a69f
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2030618
GVS: Gerrit_Virtual_Submit
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-02-27 17:17:42 +05:30
committed by mobile promotions
parent cfe935ff5c
commit 08aaaecc61
3 changed files with 21 additions and 0 deletions

View File

@@ -251,7 +251,9 @@ int nvgpu_falcon_copy_from_emem(struct nvgpu_falcon *flcn,
flcn_dops = &flcn->flcn_engine_dep_ops;
if (flcn_dops->copy_from_emem != NULL) {
nvgpu_mutex_acquire(&flcn->emem_lock);
status = flcn_dops->copy_from_emem(g, src, dst, size, port);
nvgpu_mutex_release(&flcn->emem_lock);
} else {
nvgpu_warn(g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -284,7 +286,9 @@ int nvgpu_falcon_copy_to_emem(struct nvgpu_falcon *flcn,
flcn_dops = &flcn->flcn_engine_dep_ops;
if (flcn_dops->copy_to_emem != NULL) {
nvgpu_mutex_acquire(&flcn->emem_lock);
status = flcn_dops->copy_to_emem(g, dst, src, size, port);
nvgpu_mutex_release(&flcn->emem_lock);
} else {
nvgpu_warn(g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -822,6 +826,17 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
return err;
}
if (flcn->emem_supported) {
err = nvgpu_mutex_init(&flcn->emem_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.emem_lock "
"mutex initialization");
nvgpu_mutex_destroy(&flcn->dmem_lock);
nvgpu_mutex_destroy(&flcn->imem_lock);
return err;
}
}
return 0;
}
@@ -842,6 +857,9 @@ void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
return;
}
if (flcn->emem_supported) {
nvgpu_mutex_destroy(&flcn->emem_lock);
}
nvgpu_mutex_destroy(&flcn->dmem_lock);
nvgpu_mutex_destroy(&flcn->imem_lock);
}

View File

@@ -56,6 +56,7 @@ void tu104_falcon_sw_init(struct nvgpu_falcon *flcn)
flcn->flcn_base = g->ops.sec2.falcon_base_addr();
flcn->is_falcon_supported = true;
flcn->is_interrupt_enabled = true;
flcn->emem_supported = true;
break;
case FALCON_ID_NVDEC:
flcn->flcn_base = g->ops.nvdec.falcon_base_addr();

View File

@@ -108,8 +108,10 @@ struct nvgpu_falcon {
u32 flcn_base;
bool is_falcon_supported;
bool is_interrupt_enabled;
bool emem_supported;
struct nvgpu_mutex imem_lock;
struct nvgpu_mutex dmem_lock;
struct nvgpu_mutex emem_lock;
struct nvgpu_falcon_engine_dependency_ops flcn_engine_dep_ops;
};