gpu: nvgpu: move deferred interrupt wait to common code

- Deferred interrupt wait uses nvgpu abstraction
  so can be made common for QNX/Linux.

Jira NVGPU-1396

Change-Id: Iaabc5f004d702ba1dc3fba62778ae1b7044f0392
Signed-off-by: Shashank Singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1975137
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Shashank Singh
2018-12-18 14:34:01 +05:30
committed by mobile promotions
parent dce78f7332
commit d9438128a8
5 changed files with 62 additions and 64 deletions

View File

@@ -1,7 +1,7 @@
/*
* GK20A Master Control
*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -51,3 +51,42 @@ u32 nvgpu_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
return val;
}
/**
* cyclic_delta - Returns delta of cyclic integers a and b.
*
* @a - First integer
* @b - Second integer
*
* Note: if a is ahead of b, delta is positive.
*/
static int cyclic_delta(int a, int b)
{
return a - b;
}
/**
* nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
*
* @g - The GPU to wait on.
*
* Waits until all interrupt handlers that have been scheduled to run have
* completed.
*/
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
{
int stall_irq_threshold = nvgpu_atomic_read(&g->hw_irq_stall_count);
int nonstall_irq_threshold = nvgpu_atomic_read(&g->hw_irq_nonstall_count);
/* wait until all stalling irqs are handled */
NVGPU_COND_WAIT(&g->sw_irq_stall_last_handled_cond,
cyclic_delta(stall_irq_threshold,
nvgpu_atomic_read(&g->sw_irq_stall_last_handled))
<= 0, 0);
/* wait until all non-stalling irqs are handled */
NVGPU_COND_WAIT(&g->sw_irq_nonstall_last_handled_cond,
cyclic_delta(nonstall_irq_threshold,
nvgpu_atomic_read(&g->sw_irq_nonstall_last_handled))
<= 0, 0);
}

View File

@@ -1745,6 +1745,16 @@ struct gk20a {
bool pmu_lsf_pmu_wpr_init_done;
u32 pmu_lsf_loaded_falcon_id;
/* Needed to keep track of deferred interrupts */
nvgpu_atomic_t hw_irq_stall_count;
nvgpu_atomic_t hw_irq_nonstall_count;
struct nvgpu_cond sw_irq_stall_last_handled_cond;
nvgpu_atomic_t sw_irq_stall_last_handled;
struct nvgpu_cond sw_irq_nonstall_last_handled_cond;
nvgpu_atomic_t sw_irq_nonstall_last_handled;
int irqs_enabled;
int irq_stall; /* can be same as irq_nonstall in case of PCI */
int irq_nonstall;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -50,8 +50,8 @@ static void nvgpu_init_vars(struct gk20a *g)
struct device *dev = dev_from_gk20a(g);
struct gk20a_platform *platform = dev_get_drvdata(dev);
nvgpu_cond_init(&l->sw_irq_stall_last_handled_wq);
nvgpu_cond_init(&l->sw_irq_nonstall_last_handled_wq);
nvgpu_cond_init(&g->sw_irq_stall_last_handled_cond);
nvgpu_cond_init(&g->sw_irq_nonstall_last_handled_cond);
init_rwsem(&l->busy_lock);
nvgpu_rwsem_init(&g->deterministic_busy);
@@ -297,46 +297,6 @@ int nvgpu_probe(struct gk20a *g,
return 0;
}
/**
* cyclic_delta - Returns delta of cyclic integers a and b.
*
* @a - First integer
* @b - Second integer
*
* Note: if a is ahead of b, delta is positive.
*/
static int cyclic_delta(int a, int b)
{
return a - b;
}
/**
* nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
*
* @g - The GPU to wait on.
*
* Waits until all interrupt handlers that have been scheduled to run have
* completed.
*/
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
int stall_irq_threshold = atomic_read(&l->hw_irq_stall_count);
int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
/* wait until all stalling irqs are handled */
NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
cyclic_delta(stall_irq_threshold,
atomic_read(&l->sw_irq_stall_last_handled))
<= 0, 0);
/* wait until all non-stalling irqs are handled */
NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
cyclic_delta(nonstall_irq_threshold,
atomic_read(&l->sw_irq_nonstall_last_handled))
<= 0, 0);
}
static void nvgpu_free_gk20a(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,6 @@
irqreturn_t nvgpu_intr_stall(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
u32 mc_intr_0;
trace_mc_gk20a_intr_stall(g->name);
@@ -37,7 +36,7 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
g->ops.mc.intr_stall_pause(g);
atomic_inc(&l->hw_irq_stall_count);
nvgpu_atomic_inc(&g->hw_irq_stall_count);
trace_mc_gk20a_intr_stall_done(g->name);
@@ -46,20 +45,19 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
int hw_irq_count;
nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
trace_mc_gk20a_intr_thread_stall(g->name);
hw_irq_count = atomic_read(&l->hw_irq_stall_count);
hw_irq_count = nvgpu_atomic_read(&g->hw_irq_stall_count);
g->ops.mc.isr_stall(g);
g->ops.mc.intr_stall_resume(g);
/* sync handled irq counter before re-enabling interrupts */
atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
nvgpu_atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count);
nvgpu_cond_broadcast(&l->sw_irq_stall_last_handled_wq);
nvgpu_cond_broadcast(&g->sw_irq_stall_last_handled_cond);
trace_mc_gk20a_intr_thread_stall_done(g->name);
@@ -94,14 +92,14 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
}
hw_irq_count = atomic_inc_return(&l->hw_irq_nonstall_count);
hw_irq_count = nvgpu_atomic_inc_return(&g->hw_irq_nonstall_count);
/* sync handled irq counter before re-enabling interrupts */
atomic_set(&l->sw_irq_nonstall_last_handled, hw_irq_count);
nvgpu_atomic_set(&g->sw_irq_nonstall_last_handled, hw_irq_count);
g->ops.mc.intr_nonstall_resume(g);
nvgpu_cond_broadcast(&l->sw_irq_nonstall_last_handled_wq);
nvgpu_cond_broadcast(&g->sw_irq_nonstall_last_handled_cond);
return IRQ_HANDLED;
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -108,17 +108,8 @@ struct nvgpu_os_linux {
struct device_dma_parameters dma_parms;
atomic_t hw_irq_stall_count;
atomic_t hw_irq_nonstall_count;
struct nvgpu_cond sw_irq_stall_last_handled_wq;
atomic_t sw_irq_stall_last_handled;
atomic_t nonstall_ops;
struct nvgpu_cond sw_irq_nonstall_last_handled_wq;
atomic_t sw_irq_nonstall_last_handled;
struct work_struct nonstall_fn_work;
struct workqueue_struct *nonstall_work_queue;