mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
Previously, unit interrupt enabling/disabling and corresponding MC level interrupt enabling/disabling was not done at the same time. With this change, stall and nonstall interrupt for units are programmed at MC level along with individual unit interrupts. Kept access to MC interrupt registers through mc.intr_lock spinlock. For doing this separated CE and GR interrupt mask functions. mc.intr_enable is only used when there is global interrupt control to be set. Removed mc_gp10b.c as mc_gp10b_intr_enable is now removed. Removed following functions - mc_gv100_intr_enable, mc_gv11b_intr_enable & intr_tu104_enable. Removed intr_pmu_unit_config as we can use the generic unit interrupt control function. JIRA NVGPU-4336 Change-Id: Ibd296d4a60fda6ba930f18f518ee56ab3f9dacad Signed-off-by: Sagar Kamble <skamble@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2196178 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
157 lines
3.7 KiB
C
157 lines
3.7 KiB
C
/*
|
|
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#ifdef CONFIG_NVGPU_TRACE
|
|
#include <trace/events/gk20a.h>
|
|
#endif
|
|
#include <linux/irqreturn.h>
|
|
|
|
#include <nvgpu/gk20a.h>
|
|
#include <nvgpu/mc.h>
|
|
#include <nvgpu/nvgpu_init.h>
|
|
#include <nvgpu/gops_mc.h>
|
|
|
|
#include <nvgpu/atomic.h>
|
|
#include "os_linux.h"
|
|
|
|
irqreturn_t nvgpu_intr_stall(struct gk20a *g)
|
|
{
|
|
u32 mc_intr_0;
|
|
|
|
#ifdef CONFIG_NVGPU_TRACE
|
|
trace_mc_gk20a_intr_stall(g->name);
|
|
#endif
|
|
|
|
if (nvgpu_is_powered_off(g))
|
|
return IRQ_NONE;
|
|
|
|
/* not from gpu when sharing irq with others */
|
|
mc_intr_0 = g->ops.mc.intr_stall(g);
|
|
if (unlikely(!mc_intr_0))
|
|
return IRQ_NONE;
|
|
|
|
nvgpu_mc_intr_stall_pause(g);
|
|
#ifndef CONFIG_NVGPU_RECOVERY
|
|
if (g->sw_quiesce_pending) {
|
|
return IRQ_NONE;
|
|
}
|
|
#endif
|
|
|
|
nvgpu_atomic_inc(&g->mc.hw_irq_stall_count);
|
|
|
|
#ifdef CONFIG_NVGPU_TRACE
|
|
trace_mc_gk20a_intr_stall_done(g->name);
|
|
#endif
|
|
|
|
return IRQ_WAKE_THREAD;
|
|
}
|
|
|
|
irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
|
|
{
|
|
int hw_irq_count;
|
|
|
|
nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
|
|
|
|
#ifdef CONFIG_NVGPU_TRACE
|
|
trace_mc_gk20a_intr_thread_stall(g->name);
|
|
#endif
|
|
|
|
hw_irq_count = nvgpu_atomic_read(&g->mc.hw_irq_stall_count);
|
|
g->ops.mc.isr_stall(g);
|
|
nvgpu_mc_intr_stall_resume(g);
|
|
/* sync handled irq counter before re-enabling interrupts */
|
|
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, hw_irq_count);
|
|
|
|
nvgpu_cond_broadcast(&g->mc.sw_irq_stall_last_handled_cond);
|
|
|
|
#ifdef CONFIG_NVGPU_TRACE
|
|
trace_mc_gk20a_intr_thread_stall_done(g->name);
|
|
#endif
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
|
|
{
|
|
u32 non_stall_intr_val;
|
|
u32 hw_irq_count;
|
|
int ops_old, ops_new, ops = 0;
|
|
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
|
|
|
if (nvgpu_is_powered_off(g))
|
|
return IRQ_NONE;
|
|
|
|
/* not from gpu when sharing irq with others */
|
|
non_stall_intr_val = g->ops.mc.intr_nonstall(g);
|
|
if (unlikely(!non_stall_intr_val))
|
|
return IRQ_NONE;
|
|
|
|
nvgpu_mc_intr_nonstall_pause(g);
|
|
#ifndef CONFIG_NVGPU_RECOVERY
|
|
if (g->sw_quiesce_pending) {
|
|
return IRQ_NONE;
|
|
}
|
|
#endif
|
|
|
|
ops = g->ops.mc.isr_nonstall(g);
|
|
if (ops) {
|
|
do {
|
|
ops_old = atomic_read(&l->nonstall_ops);
|
|
ops_new = ops_old | ops;
|
|
} while (ops_old != atomic_cmpxchg(&l->nonstall_ops,
|
|
ops_old, ops_new));
|
|
|
|
queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
|
|
}
|
|
|
|
hw_irq_count = nvgpu_atomic_inc_return(&g->mc.hw_irq_nonstall_count);
|
|
|
|
/* sync handled irq counter before re-enabling interrupts */
|
|
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, hw_irq_count);
|
|
|
|
nvgpu_mc_intr_nonstall_resume(g);
|
|
|
|
nvgpu_cond_broadcast(&g->mc.sw_irq_nonstall_last_handled_cond);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops)
|
|
{
|
|
bool semaphore_wakeup, post_events;
|
|
|
|
semaphore_wakeup =
|
|
(((ops & NVGPU_NONSTALL_OPS_WAKEUP_SEMAPHORE) != 0U) ?
|
|
true : false);
|
|
post_events = (((ops & NVGPU_NONSTALL_OPS_POST_EVENTS) != 0U) ?
|
|
true: false);
|
|
|
|
if (semaphore_wakeup) {
|
|
g->ops.semaphore_wakeup(g, post_events);
|
|
}
|
|
}
|
|
|
|
void nvgpu_intr_nonstall_cb(struct work_struct *work)
|
|
{
|
|
struct nvgpu_os_linux *l =
|
|
container_of(work, struct nvgpu_os_linux, nonstall_fn_work);
|
|
struct gk20a *g = &l->g;
|
|
|
|
do {
|
|
u32 ops;
|
|
|
|
ops = atomic_xchg(&l->nonstall_ops, 0);
|
|
mc_gk20a_handle_intr_nonstall(g, ops);
|
|
} while (atomic_read(&l->nonstall_ops) != 0);
|
|
}
|