mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: keep runlist submit lock only for submit registers
We right now acquire rulist_submit_mutex to submit runlist and also to wait for submit completion But locking is only needed to atomically configure the runlist submit registers, hence move the locking to inside of gk20a_fifo_runlist_hw_submit() where we program the registers Also convert the mutex to spinlock at the same time Note that similar locking is not required for tu104_fifo_runlist_hw_submit() since the runlist submit registers are per-runlist beginning Turing Bug 200452543 Change-Id: I53d6179b80cb066466b64c6efa9393e55e381bfc Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1919058 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
Abdul Salam
parent
b30438b52a
commit
84a37954fb
@@ -600,7 +600,6 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f)
|
||||
}
|
||||
|
||||
gk20a_fifo_delete_runlist(f);
|
||||
nvgpu_mutex_destroy(&f->runlist_submit_mutex);
|
||||
|
||||
nvgpu_kfree(g, f->pbdma_map);
|
||||
f->pbdma_map = NULL;
|
||||
@@ -920,11 +919,7 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_mutex_init(&f->runlist_submit_mutex);
|
||||
if (err) {
|
||||
nvgpu_err(g, "failed to init runlist_submit_mutex");
|
||||
return err;
|
||||
}
|
||||
nvgpu_spinlock_init(&f->runlist_submit_lock);
|
||||
|
||||
g->ops.fifo.init_pbdma_intr_descs(f); /* just filling in data/tables */
|
||||
|
||||
@@ -3489,6 +3484,8 @@ void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
runlist = &g->fifo.runlist_info[runlist_id];
|
||||
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
|
||||
|
||||
nvgpu_spinlock_acquire(&g->fifo.runlist_submit_lock);
|
||||
|
||||
if (count != 0) {
|
||||
gk20a_writel(g, fifo_runlist_base_r(),
|
||||
fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) |
|
||||
@@ -3501,6 +3498,8 @@ void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
gk20a_writel(g, fifo_runlist_r(),
|
||||
fifo_runlist_engine_f(runlist_id) |
|
||||
fifo_eng_runlist_length_f(count));
|
||||
|
||||
nvgpu_spinlock_release(&g->fifo.runlist_submit_lock);
|
||||
}
|
||||
|
||||
int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
@@ -3590,7 +3589,6 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
runlist->count = 0;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&f->runlist_submit_mutex);
|
||||
g->ops.fifo.runlist_hw_submit(g, runlist_id, runlist->count, new_buf);
|
||||
|
||||
if (wait_for_finish) {
|
||||
@@ -3598,7 +3596,6 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
nvgpu_err(g, "runlist %d update timeout", runlist_id);
|
||||
nvgpu_mutex_release(&f->runlist_submit_mutex);
|
||||
/* trigger runlist update timeout recovery */
|
||||
return ret;
|
||||
|
||||
@@ -3606,7 +3603,6 @@ int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
nvgpu_err(g, "runlist update interrupted");
|
||||
}
|
||||
}
|
||||
nvgpu_mutex_release(&f->runlist_submit_mutex);
|
||||
|
||||
runlist->cur_buffer = new_buf;
|
||||
|
||||
|
||||
@@ -185,7 +185,7 @@ struct fifo_gk20a {
|
||||
struct nvgpu_list_node free_chs;
|
||||
struct nvgpu_mutex free_chs_mutex;
|
||||
struct nvgpu_mutex gr_reset_mutex;
|
||||
struct nvgpu_mutex runlist_submit_mutex;
|
||||
struct nvgpu_spinlock runlist_submit_lock;
|
||||
|
||||
struct tsg_gk20a *tsg;
|
||||
struct nvgpu_mutex tsg_inuse_mutex;
|
||||
|
||||
Reference in New Issue
Block a user