gpu: nvgpu: move falcon mem copy locking to common

Falcon copy_lock mutex operations are hal independent. Move to falcon.c.

JIRA NVGPU-1459

Change-Id: I6ff90eb7c96d495c317fcf0313aa2934d1fc0d8c
Signed-off-by: Sagar Kamble <skamble@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2015588
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Sagar Kamble
2019-02-06 14:36:05 +05:30
committed by mobile promotions
parent c2a1cc5ff8
commit f2fc0c2ba8
5 changed files with 25 additions and 43 deletions

View File

@@ -280,7 +280,9 @@ int nvgpu_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
flcn_ops = &flcn->flcn_ops;
if (flcn_ops->copy_from_dmem != NULL) {
nvgpu_mutex_acquire(&flcn->copy_lock);
status = flcn_ops->copy_from_dmem(flcn, src, dst, size, port);
nvgpu_mutex_release(&flcn->copy_lock);
} else {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -302,7 +304,9 @@ int nvgpu_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
flcn_ops = &flcn->flcn_ops;
if (flcn_ops->copy_to_dmem != NULL) {
nvgpu_mutex_acquire(&flcn->copy_lock);
status = flcn_ops->copy_to_dmem(flcn, dst, src, size, port);
nvgpu_mutex_release(&flcn->copy_lock);
} else {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -324,7 +328,9 @@ int nvgpu_falcon_copy_from_imem(struct nvgpu_falcon *flcn,
flcn_ops = &flcn->flcn_ops;
if (flcn_ops->copy_from_imem != NULL) {
nvgpu_mutex_acquire(&flcn->copy_lock);
status = flcn_ops->copy_from_imem(flcn, src, dst, size, port);
nvgpu_mutex_release(&flcn->copy_lock);
} else {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -346,8 +352,10 @@ int nvgpu_falcon_copy_to_imem(struct nvgpu_falcon *flcn,
flcn_ops = &flcn->flcn_ops;
if (flcn_ops->copy_to_imem != NULL) {
nvgpu_mutex_acquire(&flcn->copy_lock);
status = flcn_ops->copy_to_imem(flcn, dst, src, size, port,
sec, tag);
nvgpu_mutex_release(&flcn->copy_lock);
} else {
nvgpu_warn(flcn->g, "Invalid op on falcon 0x%x ",
flcn->flcn_id);
@@ -609,6 +617,7 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
{
struct nvgpu_falcon **flcn_p = NULL, *flcn = NULL;
struct gpu_ops *gops = &g->ops;
int err;
flcn_p = falcon_get_instance(g, flcn_id);
if (flcn_p == NULL) {
@@ -622,6 +631,13 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
return -ENOMEM;
}
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
nvgpu_kfree(g, flcn);
return err;
}
flcn->flcn_id = flcn_id;
flcn->g = g;
@@ -633,7 +649,7 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
{
struct nvgpu_falcon **flcn_p = NULL;
struct nvgpu_falcon **flcn_p = NULL, *flcn = NULL;
struct gpu_ops *gops = &g->ops;
flcn_p = falcon_get_instance(g, flcn_id);
@@ -641,7 +657,9 @@ void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
return;
}
gops->falcon.falcon_hal_sw_free(*flcn_p);
nvgpu_kfree(g, *flcn_p);
flcn = *flcn_p;
gops->falcon.falcon_hal_sw_free(flcn);
nvgpu_mutex_destroy(&flcn->copy_lock);
nvgpu_kfree(g, flcn);
*flcn_p = NULL;
}

View File

@@ -207,8 +207,6 @@ static int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
return -EINVAL;
}
nvgpu_mutex_acquire(&flcn->copy_lock);
words = size >> 2;
bytes = size & 0x3U;
@@ -232,7 +230,6 @@ static int gk20a_falcon_copy_from_dmem(struct nvgpu_falcon *flcn,
}
}
nvgpu_mutex_release(&flcn->copy_lock);
return 0;
}
@@ -252,8 +249,6 @@ static int gk20a_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
return -EINVAL;
}
nvgpu_mutex_acquire(&flcn->copy_lock);
words = size >> 2;
bytes = size & 0x3U;
@@ -286,8 +281,6 @@ static int gk20a_falcon_copy_to_dmem(struct nvgpu_falcon *flcn,
data - dst, size);
}
nvgpu_mutex_release(&flcn->copy_lock);
return 0;
}
@@ -310,8 +303,6 @@ static int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
return -EINVAL;
}
nvgpu_mutex_acquire(&flcn->copy_lock);
words = size >> 2;
bytes = size & 0x3U;
blk = src >> 8;
@@ -336,8 +327,6 @@ static int gk20a_falcon_copy_from_imem(struct nvgpu_falcon *flcn, u32 src,
}
}
nvgpu_mutex_release(&flcn->copy_lock);
return 0;
}
@@ -358,8 +347,6 @@ static int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
return -EINVAL;
}
nvgpu_mutex_acquire(&flcn->copy_lock);
words = size >> 2;
blk = dst >> 8;
@@ -391,8 +378,6 @@ static int gk20a_falcon_copy_to_imem(struct nvgpu_falcon *flcn, u32 dst,
i++;
}
nvgpu_mutex_release(&flcn->copy_lock);
return 0;
}
@@ -753,12 +738,7 @@ int gk20a_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
}
if (flcn->is_falcon_supported) {
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
} else {
gk20a_falcon_ops(flcn);
}
gk20a_falcon_ops(flcn);
} else {
nvgpu_log_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);
@@ -772,7 +752,6 @@ void gk20a_falcon_hal_sw_free(struct nvgpu_falcon *flcn)
struct gk20a *g = flcn->g;
if (flcn->is_falcon_supported) {
nvgpu_mutex_destroy(&flcn->copy_lock);
flcn->is_falcon_supported = false;
} else {
nvgpu_log_info(g, "falcon 0x%x not supported on %s",

View File

@@ -90,12 +90,7 @@ int gp106_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
}
if (flcn->is_falcon_supported) {
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in copy_lock mutex initialization");
} else {
gp106_falcon_ops(flcn);
}
gp106_falcon_ops(flcn);
} else {
nvgpu_info(g, "falcon 0x%x not supported on %s",
flcn->flcn_id, g->name);

View File

@@ -70,12 +70,7 @@ int gv100_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
}
if (flcn->is_falcon_supported) {
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
} else {
gv100_falcon_ops(flcn);
}
gv100_falcon_ops(flcn);
} else {
/*
* Forward call to previous chips HAL

View File

@@ -79,12 +79,7 @@ int tu104_falcon_hal_sw_init(struct nvgpu_falcon *flcn)
}
if (flcn->is_falcon_supported) {
err = nvgpu_mutex_init(&flcn->copy_lock);
if (err != 0) {
nvgpu_err(g, "Error in flcn.copy_lock mutex initialization");
} else {
tu104_falcon_ops(flcn);
}
tu104_falcon_ops(flcn);
} else {
/*
* Forward call to previous chips HAL