mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: split sync HAL into syncpt and sema
Split sync HAL into sync.syncpt and sync.sema Jira NVGPU-1984 Jira NVGPU-1986 Change-Id: I66bd6948e1d77b7728a667de3d3b1ae2adc62e27 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2096373 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
c0cf011600
commit
656a9aa170
@@ -81,7 +81,7 @@ static void add_sema_cmd(struct gk20a *g, struct channel_gk20a *c,
|
|||||||
nvgpu_semaphore_prepare(s, c->hw_sema);
|
nvgpu_semaphore_prepare(s, c->hw_sema);
|
||||||
}
|
}
|
||||||
|
|
||||||
g->ops.sync.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
|
g->ops.sync.sema.add_sema_cmd(g, s, va, cmd, off, acquire, wfi);
|
||||||
|
|
||||||
if (acquire) {
|
if (acquire) {
|
||||||
gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
|
gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
|
||||||
@@ -154,7 +154,7 @@ static int channel_sync_semaphore_wait_fd(
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_cmd_size = c->g->ops.sync.get_sema_wait_cmd_size();
|
wait_cmd_size = c->g->ops.sync.sema.get_sema_wait_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
||||||
wait_cmd_size * num_fences, entry);
|
wait_cmd_size * num_fences, entry);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -195,7 +195,7 @@ static int channel_sync_semaphore_incr_common(
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
incr_cmd_size = c->g->ops.sync.get_sema_incr_cmd_size();
|
incr_cmd_size = c->g->ops.sync.sema.get_sema_incr_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
|
err = gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, incr_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(c->g,
|
nvgpu_err(c->g,
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
|
|||||||
} else {
|
} else {
|
||||||
if (!preallocated) {
|
if (!preallocated) {
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
||||||
c->g->ops.sync.get_syncpt_wait_cmd_size(),
|
c->g->ops.sync.syncpt.get_syncpt_wait_cmd_size(),
|
||||||
wait_cmd);
|
wait_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
nvgpu_err(c->g, "not enough priv cmd buffer space");
|
||||||
@@ -81,7 +81,7 @@ static int channel_sync_syncpt_gen_wait_cmd(struct channel_gk20a *c,
|
|||||||
}
|
}
|
||||||
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
|
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
|
||||||
id, c->vm->syncpt_ro_map_gpu_va);
|
id, c->vm->syncpt_ro_map_gpu_va);
|
||||||
c->g->ops.sync.add_syncpt_wait_cmd(c->g, wait_cmd,
|
c->g->ops.sync.syncpt.add_syncpt_wait_cmd(c->g, wait_cmd,
|
||||||
pos * wait_cmd_size, id, thresh,
|
pos * wait_cmd_size, id, thresh,
|
||||||
c->vm->syncpt_ro_map_gpu_va);
|
c->vm->syncpt_ro_map_gpu_va);
|
||||||
}
|
}
|
||||||
@@ -94,7 +94,7 @@ static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync_syncpt *s,
|
|||||||
{
|
{
|
||||||
struct channel_gk20a *c = s->c;
|
struct channel_gk20a *c = s->c;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u32 wait_cmd_size = c->g->ops.sync.get_syncpt_wait_cmd_size();
|
u32 wait_cmd_size = c->g->ops.sync.syncpt.get_syncpt_wait_cmd_size();
|
||||||
|
|
||||||
if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(s->nvhost_dev, id)) {
|
if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(s->nvhost_dev, id)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -150,7 +150,7 @@ static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_cmd_size = c->g->ops.sync.get_syncpt_wait_cmd_size();
|
wait_cmd_size = c->g->ops.sync.syncpt.get_syncpt_wait_cmd_size();
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
||||||
wait_cmd_size * num_fences, wait_cmd);
|
wait_cmd_size * num_fences, wait_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
@@ -196,7 +196,7 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
|
|||||||
struct nvgpu_os_fence os_fence = {0};
|
struct nvgpu_os_fence os_fence = {0};
|
||||||
|
|
||||||
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
err = gk20a_channel_alloc_priv_cmdbuf(c,
|
||||||
c->g->ops.sync.get_syncpt_incr_cmd_size(wfi_cmd),
|
c->g->ops.sync.syncpt.get_syncpt_incr_cmd_size(wfi_cmd),
|
||||||
incr_cmd);
|
incr_cmd);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
return err;
|
return err;
|
||||||
@@ -204,11 +204,11 @@ static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
|
|||||||
|
|
||||||
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
|
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
|
||||||
sp->id, sp->syncpt_buf.gpu_va);
|
sp->id, sp->syncpt_buf.gpu_va);
|
||||||
c->g->ops.sync.add_syncpt_incr_cmd(c->g, wfi_cmd,
|
c->g->ops.sync.syncpt.add_syncpt_incr_cmd(c->g, wfi_cmd,
|
||||||
incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
|
incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
|
||||||
|
|
||||||
thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id,
|
thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id,
|
||||||
c->g->ops.sync.get_syncpt_incr_per_release());
|
c->g->ops.sync.syncpt.get_syncpt_incr_per_release());
|
||||||
|
|
||||||
if (register_irq) {
|
if (register_irq) {
|
||||||
struct channel_gk20a *referenced = gk20a_channel_get(c);
|
struct channel_gk20a *referenced = gk20a_channel_get(c);
|
||||||
@@ -323,7 +323,7 @@ static void channel_sync_syncpt_destroy(struct nvgpu_channel_sync *s)
|
|||||||
nvgpu_channel_sync_syncpt_from_ops(s);
|
nvgpu_channel_sync_syncpt_from_ops(s);
|
||||||
|
|
||||||
|
|
||||||
sp->c->g->ops.sync.free_syncpt_buf(sp->c, &sp->syncpt_buf);
|
sp->c->g->ops.sync.syncpt.free_syncpt_buf(sp->c, &sp->syncpt_buf);
|
||||||
|
|
||||||
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
|
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
|
||||||
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id);
|
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id);
|
||||||
@@ -391,7 +391,7 @@ nvgpu_channel_sync_syncpt_create(struct channel_gk20a *c, bool user_managed)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
sp->c->g->ops.sync.alloc_syncpt_buf(sp->c, sp->id,
|
sp->c->g->ops.sync.syncpt.alloc_syncpt_buf(sp->c, sp->id,
|
||||||
&sp->syncpt_buf);
|
&sp->syncpt_buf);
|
||||||
|
|
||||||
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
|
nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
|
||||||
|
|||||||
@@ -462,20 +462,26 @@ static const struct gpu_ops vgpu_gp10b_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_wait_cmd_size = gk20a_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gk20a_get_syncpt_wait_cmd_size,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gk20a_get_syncpt_incr_per_release,
|
gk20a_get_syncpt_incr_per_release,
|
||||||
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gk20a_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gk20a_get_syncpt_incr_cmd_size,
|
||||||
.get_sync_ro_map = NULL,
|
.get_sync_ro_map = NULL,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gk20a_add_sema_cmd,
|
.add_sema_cmd = gk20a_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info = NULL,
|
.read_engine_status_info = NULL,
|
||||||
.dump_engine_status = NULL,
|
.dump_engine_status = NULL,
|
||||||
|
|||||||
@@ -549,20 +549,26 @@ static const struct gpu_ops vgpu_gv11b_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = vgpu_gv11b_fifo_alloc_syncpt_buf,
|
.alloc_syncpt_buf = vgpu_gv11b_fifo_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = vgpu_gv11b_fifo_free_syncpt_buf,
|
.free_syncpt_buf = vgpu_gv11b_fifo_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_wait_cmd_size = gv11b_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gv11b_get_syncpt_wait_cmd_size,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gv11b_get_syncpt_incr_per_release,
|
gv11b_get_syncpt_incr_per_release,
|
||||||
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gv11b_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gv11b_get_syncpt_incr_cmd_size,
|
||||||
.get_sync_ro_map = vgpu_gv11b_fifo_get_sync_ro_map,
|
.get_sync_ro_map = vgpu_gv11b_fifo_get_sync_ro_map,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gv11b_add_sema_cmd,
|
.add_sema_cmd = gv11b_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info = NULL,
|
.read_engine_status_info = NULL,
|
||||||
.dump_engine_status = NULL,
|
.dump_engine_status = NULL,
|
||||||
|
|||||||
@@ -696,20 +696,26 @@ static const struct gpu_ops gm20b_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gk20a_get_syncpt_incr_per_release,
|
gk20a_get_syncpt_incr_per_release,
|
||||||
.get_syncpt_wait_cmd_size = gk20a_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gk20a_get_syncpt_wait_cmd_size,
|
||||||
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gk20a_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gk20a_get_syncpt_incr_cmd_size,
|
||||||
.get_sync_ro_map = NULL,
|
.get_sync_ro_map = NULL,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gk20a_add_sema_cmd,
|
.add_sema_cmd = gk20a_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info =
|
.read_engine_status_info =
|
||||||
gm20b_read_engine_status_info,
|
gm20b_read_engine_status_info,
|
||||||
|
|||||||
@@ -793,20 +793,26 @@ static const struct gpu_ops gp10b_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gk20a_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
.free_syncpt_buf = gk20a_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gk20a_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gk20a_get_syncpt_incr_per_release,
|
gk20a_get_syncpt_incr_per_release,
|
||||||
.get_syncpt_wait_cmd_size = gk20a_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gk20a_get_syncpt_wait_cmd_size,
|
||||||
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gk20a_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gk20a_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gk20a_get_syncpt_incr_cmd_size,
|
||||||
.get_sync_ro_map = NULL,
|
.get_sync_ro_map = NULL,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gk20a_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gk20a_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gk20a_add_sema_cmd,
|
.add_sema_cmd = gk20a_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info =
|
.read_engine_status_info =
|
||||||
gm20b_read_engine_status_info,
|
gm20b_read_engine_status_info,
|
||||||
|
|||||||
@@ -966,20 +966,26 @@ static const struct gpu_ops gv100_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_wait_cmd_size = gv11b_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gv11b_get_syncpt_wait_cmd_size,
|
||||||
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gv11b_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gv11b_get_syncpt_incr_cmd_size,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gv11b_get_syncpt_incr_per_release,
|
gv11b_get_syncpt_incr_per_release,
|
||||||
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gv11b_add_sema_cmd,
|
.add_sema_cmd = gv11b_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info =
|
.read_engine_status_info =
|
||||||
gv100_read_engine_status_info,
|
gv100_read_engine_status_info,
|
||||||
|
|||||||
@@ -940,20 +940,26 @@ static const struct gpu_ops gv11b_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_wait_cmd_size = gv11b_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gv11b_get_syncpt_wait_cmd_size,
|
||||||
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gv11b_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gv11b_get_syncpt_incr_cmd_size,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gv11b_get_syncpt_incr_per_release,
|
gv11b_get_syncpt_incr_per_release,
|
||||||
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gv11b_add_sema_cmd,
|
.add_sema_cmd = gv11b_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info =
|
.read_engine_status_info =
|
||||||
gv100_read_engine_status_info,
|
gv100_read_engine_status_info,
|
||||||
|
|||||||
@@ -1118,8 +1118,10 @@ struct gpu_ops {
|
|||||||
|
|
||||||
struct {
|
struct {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
struct {
|
||||||
int (*alloc_syncpt_buf)(struct channel_gk20a *c,
|
int (*alloc_syncpt_buf)(struct channel_gk20a *c,
|
||||||
u32 syncpt_id, struct nvgpu_mem *syncpt_buf);
|
u32 syncpt_id,
|
||||||
|
struct nvgpu_mem *syncpt_buf);
|
||||||
void (*free_syncpt_buf)(struct channel_gk20a *c,
|
void (*free_syncpt_buf)(struct channel_gk20a *c,
|
||||||
struct nvgpu_mem *syncpt_buf);
|
struct nvgpu_mem *syncpt_buf);
|
||||||
void (*add_syncpt_wait_cmd)(struct gk20a *g,
|
void (*add_syncpt_wait_cmd)(struct gk20a *g,
|
||||||
@@ -1127,19 +1129,23 @@ struct gpu_ops {
|
|||||||
u32 id, u32 thresh, u64 gpu_va);
|
u32 id, u32 thresh, u64 gpu_va);
|
||||||
u32 (*get_syncpt_wait_cmd_size)(void);
|
u32 (*get_syncpt_wait_cmd_size)(void);
|
||||||
void (*add_syncpt_incr_cmd)(struct gk20a *g,
|
void (*add_syncpt_incr_cmd)(struct gk20a *g,
|
||||||
bool wfi_cmd, struct priv_cmd_entry *cmd,
|
bool wfi_cmd,
|
||||||
|
struct priv_cmd_entry *cmd,
|
||||||
u32 id, u64 gpu_va);
|
u32 id, u64 gpu_va);
|
||||||
u32 (*get_syncpt_incr_cmd_size)(bool wfi_cmd);
|
u32 (*get_syncpt_incr_cmd_size)(bool wfi_cmd);
|
||||||
int (*get_sync_ro_map)(struct vm_gk20a *vm,
|
int (*get_sync_ro_map)(struct vm_gk20a *vm,
|
||||||
u64 *base_gpuva, u32 *sync_size);
|
u64 *base_gpuva, u32 *sync_size);
|
||||||
u32 (*get_syncpt_incr_per_release)(void);
|
u32 (*get_syncpt_incr_per_release)(void);
|
||||||
|
} syncpt;
|
||||||
#endif
|
#endif
|
||||||
|
struct {
|
||||||
u32 (*get_sema_wait_cmd_size)(void);
|
u32 (*get_sema_wait_cmd_size)(void);
|
||||||
u32 (*get_sema_incr_cmd_size)(void);
|
u32 (*get_sema_incr_cmd_size)(void);
|
||||||
void (*add_sema_cmd)(struct gk20a *g,
|
void (*add_sema_cmd)(struct gk20a *g,
|
||||||
struct nvgpu_semaphore *s, u64 sema_va,
|
struct nvgpu_semaphore *s, u64 sema_va,
|
||||||
struct priv_cmd_entry *cmd,
|
struct priv_cmd_entry *cmd,
|
||||||
u32 off, bool acquire, bool wfi);
|
u32 off, bool acquire, bool wfi);
|
||||||
|
} sema;
|
||||||
} sync;
|
} sync;
|
||||||
struct {
|
struct {
|
||||||
int (*alloc_inst)(struct gk20a *g, struct channel_gk20a *ch);
|
int (*alloc_inst)(struct gk20a *g, struct channel_gk20a *ch);
|
||||||
|
|||||||
@@ -279,13 +279,13 @@ static int nvgpu_as_ioctl_get_sync_ro_map(
|
|||||||
u32 sync_size;
|
u32 sync_size;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (g->ops.sync.get_sync_ro_map == NULL)
|
if (g->ops.sync.syncpt.get_sync_ro_map == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!nvgpu_has_syncpoints(g))
|
if (!nvgpu_has_syncpoints(g))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = g->ops.sync.get_sync_ro_map(vm, &base_gpuva, &sync_size);
|
err = g->ops.sync.syncpt.get_sync_ro_map(vm, &base_gpuva, &sync_size);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|||||||
@@ -1003,20 +1003,26 @@ static const struct gpu_ops tu104_ops = {
|
|||||||
},
|
},
|
||||||
.sync = {
|
.sync = {
|
||||||
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
||||||
|
.syncpt = {
|
||||||
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
.alloc_syncpt_buf = gv11b_alloc_syncpt_buf,
|
||||||
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
.free_syncpt_buf = gv11b_free_syncpt_buf,
|
||||||
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
.add_syncpt_wait_cmd = gv11b_add_syncpt_wait_cmd,
|
||||||
.get_syncpt_wait_cmd_size = gv11b_get_syncpt_wait_cmd_size,
|
.get_syncpt_wait_cmd_size =
|
||||||
|
gv11b_get_syncpt_wait_cmd_size,
|
||||||
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
.add_syncpt_incr_cmd = gv11b_add_syncpt_incr_cmd,
|
||||||
.get_syncpt_incr_cmd_size = gv11b_get_syncpt_incr_cmd_size,
|
.get_syncpt_incr_cmd_size =
|
||||||
|
gv11b_get_syncpt_incr_cmd_size,
|
||||||
.get_syncpt_incr_per_release =
|
.get_syncpt_incr_per_release =
|
||||||
gv11b_get_syncpt_incr_per_release,
|
gv11b_get_syncpt_incr_per_release,
|
||||||
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
.get_sync_ro_map = gv11b_get_sync_ro_map,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
|
.sema = {
|
||||||
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
.get_sema_wait_cmd_size = gv11b_get_sema_wait_cmd_size,
|
||||||
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
.get_sema_incr_cmd_size = gv11b_get_sema_incr_cmd_size,
|
||||||
.add_sema_cmd = gv11b_add_sema_cmd,
|
.add_sema_cmd = gv11b_add_sema_cmd,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
.engine_status = {
|
.engine_status = {
|
||||||
.read_engine_status_info =
|
.read_engine_status_info =
|
||||||
gv100_read_engine_status_info,
|
gv100_read_engine_status_info,
|
||||||
|
|||||||
Reference in New Issue
Block a user