mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-25 11:04:51 +03:00
gpu: nvgpu: set aggressive_sync_destroy at runtime
We currently set "aggressive_destroy" flag to destroy sync object statically and for each sync object Move this flag to per-platform structure so that it can be set per-platform for all the sync objects Also, set the default value of this flag as "false" and set it to "true" once we have more than 64 channels in use Bug 200141116 Change-Id: I1bc271df4f468a4087a06a27c7289ee0ec3ef29c Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/822041 (cherry picked from commit 98741e7e88066648f4f14490c76b61dbff745103) Reviewed-on: http://git-master/r/835800 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
committed by
Terje Bergstrom
parent
b8b6df791b
commit
7f79d647d6
@@ -42,6 +42,8 @@
|
||||
|
||||
#define NVMAP_HANDLE_PARAM_SIZE 1
|
||||
|
||||
#define NVGPU_BEGIN_AGGRESSIVE_SYNC_DESTROY_LIMIT 64 /* channels */
|
||||
|
||||
static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f);
|
||||
static void free_channel(struct fifo_gk20a *f, struct channel_gk20a *c);
|
||||
|
||||
@@ -64,6 +66,7 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch);
|
||||
static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
|
||||
{
|
||||
struct channel_gk20a *ch = NULL;
|
||||
struct gk20a_platform *platform = gk20a_get_platform(f->g->dev);
|
||||
|
||||
mutex_lock(&f->free_chs_mutex);
|
||||
if (!list_empty(&f->free_chs)) {
|
||||
@@ -72,21 +75,31 @@ static struct channel_gk20a *allocate_channel(struct fifo_gk20a *f)
|
||||
list_del(&ch->free_chs);
|
||||
WARN_ON(atomic_read(&ch->ref_count));
|
||||
WARN_ON(ch->referenceable);
|
||||
f->used_channels++;
|
||||
}
|
||||
mutex_unlock(&f->free_chs_mutex);
|
||||
|
||||
if (f->used_channels > NVGPU_BEGIN_AGGRESSIVE_SYNC_DESTROY_LIMIT)
|
||||
platform->aggressive_sync_destroy = true;
|
||||
|
||||
return ch;
|
||||
}
|
||||
|
||||
static void free_channel(struct fifo_gk20a *f,
|
||||
struct channel_gk20a *ch)
|
||||
{
|
||||
struct gk20a_platform *platform = gk20a_get_platform(f->g->dev);
|
||||
|
||||
trace_gk20a_release_used_channel(ch->hw_chid);
|
||||
/* refcount is zero here and channel is in a freed/dead state */
|
||||
mutex_lock(&f->free_chs_mutex);
|
||||
/* add to head to increase visibility of timing-related bugs */
|
||||
list_add(&ch->free_chs, &f->free_chs);
|
||||
f->used_channels--;
|
||||
mutex_unlock(&f->free_chs_mutex);
|
||||
|
||||
if (f->used_channels < NVGPU_BEGIN_AGGRESSIVE_SYNC_DESTROY_LIMIT)
|
||||
platform->aggressive_sync_destroy = false;
|
||||
}
|
||||
|
||||
int channel_gk20a_commit_va(struct channel_gk20a *c)
|
||||
@@ -311,6 +324,7 @@ static void channel_gk20a_bind(struct channel_gk20a *ch_gk20a)
|
||||
void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
|
||||
{
|
||||
struct gk20a *g = ch_gk20a->g;
|
||||
struct gk20a_platform *platform = gk20a_get_platform(g->dev);
|
||||
|
||||
gk20a_dbg_fn("");
|
||||
|
||||
@@ -327,8 +341,7 @@ void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a)
|
||||
* if not, then it will be destroyed at channel_free()
|
||||
*/
|
||||
mutex_lock(&ch_gk20a->sync_lock);
|
||||
if (ch_gk20a->sync && ch_gk20a->sync->aggressive_destroy) {
|
||||
|
||||
if (ch_gk20a->sync && platform->aggressive_sync_destroy) {
|
||||
ch_gk20a->sync->destroy(ch_gk20a->sync);
|
||||
ch_gk20a->sync = NULL;
|
||||
}
|
||||
@@ -1715,6 +1728,7 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
|
||||
{
|
||||
struct vm_gk20a *vm = c->vm;
|
||||
struct channel_gk20a_job *job, *n;
|
||||
struct gk20a_platform *platform = gk20a_get_platform(c->g->dev);
|
||||
|
||||
trace_gk20a_channel_update(c->hw_chid);
|
||||
|
||||
@@ -1769,7 +1783,7 @@ void gk20a_channel_update(struct channel_gk20a *c, int nr_completed)
|
||||
*/
|
||||
if (list_empty(&c->jobs)) {
|
||||
mutex_lock(&c->sync_lock);
|
||||
if (c->sync && c->sync->aggressive_destroy &&
|
||||
if (c->sync && platform->aggressive_sync_destroy &&
|
||||
gk20a_fence_is_expired(c->last_submit.post_fence)) {
|
||||
c->sync->destroy(c->sync);
|
||||
c->sync = NULL;
|
||||
|
||||
@@ -345,8 +345,6 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
|
||||
sp->ops.syncpt_id = gk20a_channel_syncpt_id;
|
||||
sp->ops.destroy = gk20a_channel_syncpt_destroy;
|
||||
|
||||
sp->ops.aggressive_destroy = true;
|
||||
|
||||
return &sp->ops;
|
||||
}
|
||||
#endif /* CONFIG_TEGRA_GK20A */
|
||||
@@ -690,10 +688,6 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c)
|
||||
sema->ops.syncpt_id = gk20a_channel_semaphore_syncpt_id;
|
||||
sema->ops.destroy = gk20a_channel_semaphore_destroy;
|
||||
|
||||
/* Aggressively destroying the semaphore sync would cause overhead
|
||||
* since the pool needs to be mapped to GMMU. */
|
||||
sema->ops.aggressive_destroy = false;
|
||||
|
||||
return &sema->ops;
|
||||
clean_up:
|
||||
gk20a_channel_semaphore_destroy(&sema->ops);
|
||||
|
||||
@@ -105,6 +105,7 @@ struct fifo_gk20a {
|
||||
struct mem_desc userd;
|
||||
u32 userd_entry_size;
|
||||
|
||||
int used_channels;
|
||||
struct channel_gk20a *channel;
|
||||
/* zero-kref'd channels here */
|
||||
struct list_head free_chs;
|
||||
|
||||
@@ -47,6 +47,9 @@ struct gk20a_platform {
|
||||
/* Should be populated at probe. */
|
||||
bool has_syncpoints;
|
||||
|
||||
/* flag to set sync destroy aggressiveness */
|
||||
bool aggressive_sync_destroy;
|
||||
|
||||
/* Should be populated by probe. */
|
||||
struct dentry *debugfs;
|
||||
struct dentry *debugfs_alias;
|
||||
|
||||
@@ -63,7 +63,7 @@ static void vgpu_channel_unbind(struct channel_gk20a *ch)
|
||||
* resource at this point
|
||||
* if not, then it will be destroyed at channel_free()
|
||||
*/
|
||||
if (ch->sync && ch->sync->aggressive_destroy) {
|
||||
if (ch->sync && platform->aggressive_sync_destroy) {
|
||||
ch->sync->destroy(ch->sync);
|
||||
ch->sync = NULL;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user