gpu: nvgpu: minor fixes in channel_sync.c

This patch comes as a follow up to commit
2517d59be2 containing minor fixes
i.e. changing type of 'pos' to u32 instead of int and renaming
syncpt_get_id to channel_sync_syncpt_get_id

Jira NVGPU-1086

Change-Id: I8bd9271c20d88ff5f68ccfc48a0b533844bbcaaa
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1829832
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
ddutta
2018-09-18 12:44:44 +05:30
committed by mobile promotions
parent 7e591dced9
commit feefb7046a
2 changed files with 11 additions and 11 deletions

View File

@@ -49,7 +49,7 @@ struct nvgpu_channel_sync_syncpt {
int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, int pos, bool preallocated) u32 wait_cmd_size, u32 pos, bool preallocated)
{ {
int err = 0; int err = 0;
bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext( bool is_expired = nvgpu_nvhost_syncpt_is_expired_ext(
@@ -58,7 +58,7 @@ int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
if (is_expired) { if (is_expired) {
if (preallocated) { if (preallocated) {
nvgpu_memset(c->g, wait_cmd->mem, nvgpu_memset(c->g, wait_cmd->mem,
(wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32), (wait_cmd->off + pos * wait_cmd_size) * (u32)sizeof(u32),
0, wait_cmd_size * (u32)sizeof(u32)); 0, wait_cmd_size * (u32)sizeof(u32));
} }
} else { } else {
@@ -73,7 +73,7 @@ int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx", nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
id, c->vm->syncpt_ro_map_gpu_va); id, c->vm->syncpt_ro_map_gpu_va);
c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd, c->g->ops.fifo.add_syncpt_wait_cmd(c->g, wait_cmd,
(u32)pos * wait_cmd_size, id, thresh, pos * wait_cmd_size, id, thresh,
c->vm->syncpt_ro_map_gpu_va); c->vm->syncpt_ro_map_gpu_va);
} }
@@ -257,7 +257,7 @@ static void channel_sync_syncpt_set_safe_state(struct nvgpu_channel_sync *s)
nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id); nvgpu_nvhost_syncpt_set_safe_state(sp->nvhost_dev, sp->id);
} }
static int syncpt_get_id(struct nvgpu_channel_sync *s) static int channel_sync_syncpt_get_id(struct nvgpu_channel_sync *s)
{ {
struct nvgpu_channel_sync_syncpt *sp = struct nvgpu_channel_sync_syncpt *sp =
container_of(s, struct nvgpu_channel_sync_syncpt, ops); container_of(s, struct nvgpu_channel_sync_syncpt, ops);
@@ -329,7 +329,7 @@ channel_sync_syncpt_create(struct channel_gk20a *c, bool user_managed)
sp->ops.incr_user = channel_sync_syncpt_incr_user; sp->ops.incr_user = channel_sync_syncpt_incr_user;
sp->ops.set_min_eq_max = channel_sync_syncpt_set_min_eq_max; sp->ops.set_min_eq_max = channel_sync_syncpt_set_min_eq_max;
sp->ops.set_safe_state = channel_sync_syncpt_set_safe_state; sp->ops.set_safe_state = channel_sync_syncpt_set_safe_state;
sp->ops.syncpt_id = syncpt_get_id; sp->ops.syncpt_id = channel_sync_syncpt_get_id;
sp->ops.syncpt_address = channel_sync_syncpt_get_address; sp->ops.syncpt_address = channel_sync_syncpt_get_address;
sp->ops.destroy = channel_sync_syncpt_destroy; sp->ops.destroy = channel_sync_syncpt_destroy;
@@ -390,17 +390,17 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, int pos) u32 wait_cmd_size, u32 pos)
{ {
if (sema == NULL) { if (sema == NULL) {
/* expired */ /* expired */
nvgpu_memset(c->g, wait_cmd->mem, nvgpu_memset(c->g, wait_cmd->mem,
(wait_cmd->off + (u32)pos * wait_cmd_size) * (u32)sizeof(u32), (wait_cmd->off + pos * wait_cmd_size) * (u32)sizeof(u32),
0, wait_cmd_size * (u32)sizeof(u32)); 0, wait_cmd_size * (u32)sizeof(u32));
} else { } else {
WARN_ON(!sema->incremented); WARN_ON(!sema->incremented);
add_sema_cmd(c->g, c, sema, wait_cmd, add_sema_cmd(c->g, c, sema, wait_cmd,
(u32)pos * wait_cmd_size, true, false); pos * wait_cmd_size, true, false);
nvgpu_semaphore_put(sema); nvgpu_semaphore_put(sema);
} }
} }

View File

@@ -98,11 +98,11 @@ struct nvgpu_channel_sync {
void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c, void channel_sync_semaphore_gen_wait_cmd(struct channel_gk20a *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd, struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, int pos); u32 wait_cmd_size, u32 pos);
int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c, int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd, u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size, int pos, bool preallocated); u32 wait_cmd_size, u32 pos, bool preallocated);
void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync, void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync,
bool set_safe_state); bool set_safe_state);
@@ -110,4 +110,4 @@ struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c,
bool user_managed); bool user_managed);
bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g); bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g);
#endif /* NVGPU_GK20A_CHANNEL_SYNC_GK20A_H */ #endif /* NVGPU_CHANNEL_SYNC_H */