gpu: nvgpu: Move deferred interrupt wait code

Move the code that waits for deferred interrupts to nvgpu_common.c and
make it global. Also rename that function to use nvgpu_ as the function
prefix.

Bug 1816516
Bug 1807277

Change-Id: I42c4982ea853af5489051534219bfe8b253c2784
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1250027
(cherry picked from commit cb6fb03e20b08e5c3606ae8a5a9c237bfdf9e7da)
Reviewed-on: http://git-master/r/1274475
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Alex Waterman
2016-11-08 11:28:35 -08:00
committed by mobile promotions
parent 91d977ced4
commit 9e2f7d98d4
3 changed files with 43 additions and 27 deletions

View File

@@ -871,31 +871,6 @@ static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
mutex_unlock(&ch->error_notifier_mutex);
}
/* Returns delta of cyclic integers a and b. If a is ahead of b, delta
* is positive */
static int cyclic_delta(int a, int b)
{
return a - b;
}
static void gk20a_wait_for_deferred_interrupts(struct gk20a *g)
{
int stall_irq_threshold = atomic_read(&g->hw_irq_stall_count);
int nonstall_irq_threshold = atomic_read(&g->hw_irq_nonstall_count);
/* wait until all stalling irqs are handled */
wait_event(g->sw_irq_stall_last_handled_wq,
cyclic_delta(stall_irq_threshold,
atomic_read(&g->sw_irq_stall_last_handled))
<= 0);
/* wait until all non-stalling irqs are handled */
wait_event(g->sw_irq_nonstall_last_handled_wq,
cyclic_delta(nonstall_irq_threshold,
atomic_read(&g->sw_irq_nonstall_last_handled))
<= 0);
}
static void gk20a_wait_until_counter_is_N(
struct channel_gk20a *ch, atomic_t *counter, int wait_value,
wait_queue_head_t *wq, const char *caller, const char *counter_name)
@@ -944,7 +919,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
/* wait until all pending interrupts for recently completed
* jobs are handled */
gk20a_wait_for_deferred_interrupts(g);
nvgpu_wait_for_deferred_interrupts(g);
/* prevent new refs */
spin_lock(&ch->ref_obtain_lock);
@@ -1043,7 +1018,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
/* make sure we don't have deferred interrupts pending that
* could still touch the channel */
gk20a_wait_for_deferred_interrupts(g);
nvgpu_wait_for_deferred_interrupts(g);
unbind:
if (gk20a_is_channel_marked_as_tsg(ch))

View File

@@ -1384,6 +1384,8 @@ static inline void gk20a_channel_trace_sched_param(
ch->ch_ctx.gr_ctx->compute_preempt_mode : 0));
}
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g);
#ifdef CONFIG_DEBUG_FS
int gk20a_railgating_debugfs_init(struct device *dev);
#endif

View File

@@ -231,3 +231,42 @@ const struct firmware *nvgpu_request_firmware(struct gk20a *g,
return fw;
}
/**
* cyclic_delta - Returns delta of cyclic integers a and b.
*
* @a - First integer
* @b - Second integer
*
* Note: if a is ahead of b, delta is positive.
*/
static int cyclic_delta(int a, int b)
{
return a - b;
}
/**
* nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
*
* @g - The GPU to wait on.
*
* Waits until all interrupt handlers that have been scheduled to run have
* completed.
*/
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
{
int stall_irq_threshold = atomic_read(&g->hw_irq_stall_count);
int nonstall_irq_threshold = atomic_read(&g->hw_irq_nonstall_count);
/* wait until all stalling irqs are handled */
wait_event(g->sw_irq_stall_last_handled_wq,
cyclic_delta(stall_irq_threshold,
atomic_read(&g->sw_irq_stall_last_handled))
<= 0);
/* wait until all non-stalling irqs are handled */
wait_event(g->sw_irq_nonstall_last_handled_wq,
cyclic_delta(nonstall_irq_threshold,
atomic_read(&g->sw_irq_nonstall_last_handled))
<= 0);
}