gpu: nvgpu: move open/release from fifo to tsg

Moved the following HALs from fifo to tsg:
- tsg.open
- tsg.release

They are used only in vgpu case.

Jira NVGPU-2979

Change-Id: Icda6f6cfea063ea326b1874b7f15b57a8ea0d5b9
Signed-off-by: Thomas Fleury <tfleury@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2087184
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Thomas Fleury
2019-04-01 09:39:25 -07:00
committed by mobile promotions
parent 75963b47f1
commit bf5ed9fd9f
8 changed files with 28 additions and 28 deletions

View File

@@ -136,7 +136,7 @@ int nvgpu_tsg_unbind_channel(struct tsg_gk20a *tsg, struct channel_gk20a *ch)
err = g->ops.tsg.unbind_channel(tsg, ch);
}
nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
nvgpu_ref_put(&tsg->refcount, nvgpu_tsg_release);
return 0;
}
@@ -652,7 +652,7 @@ static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f)
return tsg;
}
int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid)
int nvgpu_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid)
{
u32 no_of_sm = nvgpu_gr_config_get_no_of_sm(g->gr.config);
int err;
@@ -692,8 +692,8 @@ int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid)
g->ops.fifo.init_eng_method_buffers(g, tsg);
}
if (g->ops.fifo.tsg_open != NULL) {
err = g->ops.fifo.tsg_open(tsg);
if (g->ops.tsg.open != NULL) {
err = g->ops.tsg.open(tsg);
if (err != 0) {
nvgpu_err(g, "tsg %d fifo open failed %d",
tsg->tsgid, err);
@@ -704,13 +704,13 @@ int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid)
return 0;
clean_up:
gk20a_tsg_release_common(g, tsg);
nvgpu_tsg_release_common(g, tsg);
nvgpu_ref_put(&tsg->refcount, NULL);
return err;
}
struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
struct tsg_gk20a *nvgpu_tsg_open(struct gk20a *g, pid_t pid)
{
struct tsg_gk20a *tsg;
int err;
@@ -720,7 +720,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
return NULL;
}
err = gk20a_tsg_open_common(g, tsg, pid);
err = nvgpu_tsg_open_common(g, tsg, pid);
if (err != 0) {
release_used_tsg(&g->fifo, tsg);
nvgpu_err(g, "tsg %d open failed %d", tsg->tsgid, err);
@@ -732,10 +732,10 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
return tsg;
}
void gk20a_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg)
void nvgpu_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg)
{
if (g->ops.fifo.tsg_release != NULL) {
g->ops.fifo.tsg_release(tsg);
if (g->ops.tsg.release != NULL) {
g->ops.tsg.release(tsg);
}
nvgpu_kfree(g, tsg->gr_ctx);
@@ -763,7 +763,7 @@ static struct tsg_gk20a *tsg_gk20a_from_ref(struct nvgpu_ref *ref)
((uintptr_t)ref - offsetof(struct tsg_gk20a, refcount));
}
void gk20a_tsg_release(struct nvgpu_ref *ref)
void nvgpu_tsg_release(struct nvgpu_ref *ref)
{
struct tsg_gk20a *tsg = tsg_gk20a_from_ref(ref);
struct gk20a *g = tsg->g;
@@ -784,7 +784,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref)
}
nvgpu_mutex_release(&tsg->event_id_list_lock);
gk20a_tsg_release_common(g, tsg);
nvgpu_tsg_release_common(g, tsg);
release_used_tsg(&g->fifo, tsg);
nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid);

View File

@@ -417,8 +417,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.preempt_channel = vgpu_fifo_preempt_channel,
.preempt_tsg = vgpu_fifo_preempt_tsg,
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open,
.tsg_release = vgpu_tsg_release,
.dump_channel_status_ramfc = NULL,
.is_preempt_pending = NULL,
.reset_enable_hw = NULL,
@@ -540,6 +538,8 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.set_error_notifier = nvgpu_set_error_notifier,
},
.tsg = {
.open = vgpu_tsg_open,
.release = vgpu_tsg_release,
.enable = vgpu_tsg_enable,
.disable = nvgpu_tsg_disable,
.bind_channel = vgpu_tsg_bind_channel,

View File

@@ -500,8 +500,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.preempt_channel = vgpu_fifo_preempt_channel,
.preempt_tsg = vgpu_fifo_preempt_tsg,
.tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open,
.tsg_release = vgpu_tsg_release,
.dump_channel_status_ramfc = NULL,
.is_preempt_pending = gv11b_fifo_is_preempt_pending,
.reset_enable_hw = NULL,
@@ -628,6 +626,8 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.set_error_notifier = nvgpu_set_error_notifier,
},
.tsg = {
.open = vgpu_tsg_open,
.release = vgpu_tsg_release,
.enable = gv11b_tsg_enable,
.disable = nvgpu_tsg_disable,
.bind_channel = vgpu_gv11b_tsg_bind_channel,

View File

@@ -142,7 +142,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
* gk20a_channel_close() will also unbind the channel from TSG
*/
gk20a_channel_close(ce_ctx->ch);
nvgpu_ref_put(&ce_ctx->tsg->refcount, gk20a_tsg_release);
nvgpu_ref_put(&ce_ctx->tsg->refcount, nvgpu_tsg_release);
/* housekeeping on app */
if ((list->prev != NULL) && (list->next != NULL)) {
@@ -467,7 +467,7 @@ u32 gk20a_ce_create_context(struct gk20a *g,
ce_ctx->vm = g->mm.ce.vm;
/* allocate a tsg if needed */
ce_ctx->tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
ce_ctx->tsg = nvgpu_tsg_open(g, nvgpu_current_pid(g));
if (ce_ctx->tsg == NULL) {
nvgpu_err(g, "ce: gk20a tsg not available");
err = -ENOMEM;

View File

@@ -955,8 +955,6 @@ struct gpu_ops {
void (*apply_pb_timeout)(struct gk20a *g);
int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice);
u32 (*default_timeslice_us)(struct gk20a *g);
int (*tsg_open)(struct tsg_gk20a *tsg);
void (*tsg_release)(struct tsg_gk20a *tsg);
int (*init_pbdma_info)(struct fifo_gk20a *f);
int (*init_engine_info)(struct fifo_gk20a *f);
u32 (*get_engines_mask_on_id)(struct gk20a *g,
@@ -1148,6 +1146,8 @@ struct gpu_ops {
int (*set_syncpt)(struct channel_gk20a *ch);
} channel;
struct {
int (*open)(struct tsg_gk20a *tsg);
void (*release)(struct tsg_gk20a *tsg);
void (*enable)(struct tsg_gk20a *tsg);
void (*disable)(struct tsg_gk20a *tsg);
int (*bind_channel)(struct tsg_gk20a *tsg,

View File

@@ -82,10 +82,10 @@ struct tsg_gk20a {
struct nvgpu_mutex sm_exception_mask_lock;
};
int gk20a_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid);
struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid);
void gk20a_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg);
void gk20a_tsg_release(struct nvgpu_ref *ref);
int nvgpu_tsg_open_common(struct gk20a *g, struct tsg_gk20a *tsg, pid_t pid);
struct tsg_gk20a *nvgpu_tsg_open(struct gk20a *g, pid_t pid);
void nvgpu_tsg_release_common(struct gk20a *g, struct tsg_gk20a *tsg);
void nvgpu_tsg_release(struct nvgpu_ref *ref);
int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid);
int nvgpu_tsg_setup_sw(struct gk20a *g);

View File

@@ -119,7 +119,7 @@ __must_hold(&cde_app->mutex)
* gk20a_channel_close() will also unbind the channel from TSG
*/
gk20a_channel_close(ch);
nvgpu_ref_put(&cde_ctx->tsg->refcount, gk20a_tsg_release);
nvgpu_ref_put(&cde_ctx->tsg->refcount, nvgpu_tsg_release);
/* housekeeping on app */
nvgpu_list_del(&cde_ctx->list);
@@ -1331,7 +1331,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
return -ENOSYS;
}
tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
tsg = nvgpu_tsg_open(g, nvgpu_current_pid(g));
if (!tsg) {
nvgpu_err(g, "cde: could not create TSG");
err = -ENOMEM;

View File

@@ -416,7 +416,7 @@ int nvgpu_ioctl_tsg_open(struct gk20a *g, struct file *filp)
goto free_mem;
}
tsg = gk20a_tsg_open(g, nvgpu_current_pid(g));
tsg = nvgpu_tsg_open(g, nvgpu_current_pid(g));
gk20a_idle(g);
if (!tsg) {
err = -ENOMEM;
@@ -470,7 +470,7 @@ void nvgpu_ioctl_tsg_release(struct nvgpu_ref *ref)
gk20a_sched_ctrl_tsg_removed(g, tsg);
gk20a_tsg_release(ref);
nvgpu_tsg_release(ref);
gk20a_put(g);
}