mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
Split handling of stalling interrupt to Linux specific chip agnostic and OS independent chip specific parts. Linux specific chip independent part contains handler for ISR and passing the control to a bottom half worker. It uses the new MC HALs intr_nonstall (query interrupt status), intr_nonstall_pause (pause interrupts), intr_nonstall_resume (resume interrupts), and is_intr1_pending (query per-engine interrupt bit). MC HAL isr_nonstall is removed, because its work is now handled in chip independent code. JIRA NVGPU-26 Change-Id: I3e4c9905ef6eef7f1cc9f71b0278518ae663f87e Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1497048 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
256 lines
6.3 KiB
C
256 lines
6.3 KiB
C
/*
|
|
* GK20A Master Control
|
|
*
|
|
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms and conditions of the GNU General Public License,
|
|
* version 2, as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
* more details.
|
|
*/
|
|
|
|
#include <trace/events/gk20a.h>
|
|
|
|
#include "gk20a.h"
|
|
#include "mc_gk20a.h"
|
|
|
|
#include <nvgpu/timers.h>
|
|
#include <nvgpu/atomic.h>
|
|
#include <nvgpu/unit.h>
|
|
|
|
#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
|
|
|
|
void mc_gk20a_isr_stall(struct gk20a *g)
|
|
{
|
|
u32 mc_intr_0;
|
|
int hw_irq_count;
|
|
u32 engine_id_idx;
|
|
u32 active_engine_id = 0;
|
|
u32 engine_enum = ENGINE_INVAL_GK20A;
|
|
|
|
mc_intr_0 = g->ops.mc.intr_stall(g);
|
|
hw_irq_count = atomic_read(&g->hw_irq_stall_count);
|
|
|
|
gk20a_dbg(gpu_dbg_intr, "stall intr %08x\n", mc_intr_0);
|
|
|
|
for (engine_id_idx = 0; engine_id_idx < g->fifo.num_engines; engine_id_idx++) {
|
|
active_engine_id = g->fifo.active_engines_list[engine_id_idx];
|
|
|
|
if (mc_intr_0 & g->fifo.engine_info[active_engine_id].intr_mask) {
|
|
engine_enum = g->fifo.engine_info[active_engine_id].engine_enum;
|
|
/* GR Engine */
|
|
if (engine_enum == ENGINE_GR_GK20A) {
|
|
gr_gk20a_elpg_protected_call(g, gk20a_gr_isr(g));
|
|
}
|
|
|
|
/* CE Engine */
|
|
if (((engine_enum == ENGINE_GRCE_GK20A) ||
|
|
(engine_enum == ENGINE_ASYNC_CE_GK20A)) &&
|
|
g->ops.ce2.isr_stall){
|
|
g->ops.ce2.isr_stall(g,
|
|
g->fifo.engine_info[active_engine_id].inst_id,
|
|
g->fifo.engine_info[active_engine_id].pri_base);
|
|
}
|
|
}
|
|
}
|
|
if (mc_intr_0 & mc_intr_0_pfifo_pending_f())
|
|
gk20a_fifo_isr(g);
|
|
if (mc_intr_0 & mc_intr_0_pmu_pending_f())
|
|
gk20a_pmu_isr(g);
|
|
if (mc_intr_0 & mc_intr_0_priv_ring_pending_f())
|
|
gk20a_priv_ring_isr(g);
|
|
if (mc_intr_0 & mc_intr_0_ltc_pending_f())
|
|
g->ops.ltc.isr(g);
|
|
if (mc_intr_0 & mc_intr_0_pbus_pending_f())
|
|
g->ops.bus.isr(g);
|
|
|
|
/* sync handled irq counter before re-enabling interrupts */
|
|
atomic_set(&g->sw_irq_stall_last_handled, hw_irq_count);
|
|
}
|
|
|
|
void mc_gk20a_intr_enable(struct gk20a *g)
|
|
{
|
|
u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g);
|
|
|
|
gk20a_writel(g, mc_intr_mask_1_r(),
|
|
mc_intr_0_pfifo_pending_f()
|
|
| eng_intr_mask);
|
|
gk20a_writel(g, mc_intr_en_1_r(),
|
|
mc_intr_en_1_inta_hardware_f());
|
|
|
|
gk20a_writel(g, mc_intr_mask_0_r(),
|
|
mc_intr_0_pfifo_pending_f()
|
|
| mc_intr_0_priv_ring_pending_f()
|
|
| mc_intr_0_ltc_pending_f()
|
|
| mc_intr_0_pbus_pending_f()
|
|
| eng_intr_mask);
|
|
gk20a_writel(g, mc_intr_en_0_r(),
|
|
mc_intr_en_0_inta_hardware_f());
|
|
}
|
|
|
|
void mc_gk20a_intr_unit_config(struct gk20a *g, bool enable,
|
|
bool is_stalling, u32 mask)
|
|
{
|
|
u32 mask_reg = (is_stalling ? mc_intr_mask_0_r() :
|
|
mc_intr_mask_1_r());
|
|
|
|
if (enable) {
|
|
gk20a_writel(g, mask_reg,
|
|
gk20a_readl(g, mask_reg) |
|
|
mask);
|
|
} else {
|
|
gk20a_writel(g, mask_reg,
|
|
gk20a_readl(g, mask_reg) &
|
|
~mask);
|
|
}
|
|
}
|
|
|
|
void mc_gk20a_intr_stall_pause(struct gk20a *g)
|
|
{
|
|
gk20a_writel(g, mc_intr_en_0_r(),
|
|
mc_intr_en_0_inta_disabled_f());
|
|
|
|
/* flush previous write */
|
|
gk20a_readl(g, mc_intr_en_0_r());
|
|
}
|
|
|
|
void mc_gk20a_intr_stall_resume(struct gk20a *g)
|
|
{
|
|
gk20a_writel(g, mc_intr_en_0_r(),
|
|
mc_intr_en_0_inta_hardware_f());
|
|
|
|
/* flush previous write */
|
|
gk20a_readl(g, mc_intr_en_0_r());
|
|
}
|
|
|
|
void mc_gk20a_intr_nonstall_pause(struct gk20a *g)
|
|
{
|
|
gk20a_writel(g, mc_intr_en_1_r(),
|
|
mc_intr_en_0_inta_disabled_f());
|
|
|
|
/* flush previous write */
|
|
gk20a_readl(g, mc_intr_en_1_r());
|
|
}
|
|
|
|
void mc_gk20a_intr_nonstall_resume(struct gk20a *g)
|
|
{
|
|
gk20a_writel(g, mc_intr_en_1_r(),
|
|
mc_intr_en_0_inta_hardware_f());
|
|
|
|
/* flush previous write */
|
|
gk20a_readl(g, mc_intr_en_1_r());
|
|
}
|
|
|
|
u32 mc_gk20a_intr_stall(struct gk20a *g)
|
|
{
|
|
return gk20a_readl(g, mc_intr_0_r());
|
|
}
|
|
|
|
u32 mc_gk20a_intr_nonstall(struct gk20a *g)
|
|
{
|
|
return gk20a_readl(g, mc_intr_1_r());
|
|
}
|
|
|
|
void gk20a_mc_disable(struct gk20a *g, u32 units)
|
|
{
|
|
u32 pmc;
|
|
|
|
gk20a_dbg(gpu_dbg_info, "pmc disable: %08x\n", units);
|
|
|
|
nvgpu_spinlock_acquire(&g->mc_enable_lock);
|
|
pmc = gk20a_readl(g, mc_enable_r());
|
|
pmc &= ~units;
|
|
gk20a_writel(g, mc_enable_r(), pmc);
|
|
nvgpu_spinlock_release(&g->mc_enable_lock);
|
|
}
|
|
|
|
void gk20a_mc_enable(struct gk20a *g, u32 units)
|
|
{
|
|
u32 pmc;
|
|
|
|
gk20a_dbg(gpu_dbg_info, "pmc enable: %08x\n", units);
|
|
|
|
nvgpu_spinlock_acquire(&g->mc_enable_lock);
|
|
pmc = gk20a_readl(g, mc_enable_r());
|
|
pmc |= units;
|
|
gk20a_writel(g, mc_enable_r(), pmc);
|
|
gk20a_readl(g, mc_enable_r());
|
|
nvgpu_spinlock_release(&g->mc_enable_lock);
|
|
|
|
nvgpu_udelay(20);
|
|
}
|
|
|
|
void gk20a_mc_reset(struct gk20a *g, u32 units)
|
|
{
|
|
g->ops.mc.disable(g, units);
|
|
if (units & gk20a_fifo_get_all_ce_engine_reset_mask(g))
|
|
nvgpu_udelay(500);
|
|
else
|
|
nvgpu_udelay(20);
|
|
g->ops.mc.enable(g, units);
|
|
}
|
|
|
|
u32 gk20a_mc_boot_0(struct gk20a *g, u32 *arch, u32 *impl, u32 *rev)
|
|
{
|
|
u32 val = gk20a_readl(g, mc_boot_0_r());
|
|
|
|
if (arch)
|
|
*arch = mc_boot_0_architecture_v(val) <<
|
|
NVGPU_GPU_ARCHITECTURE_SHIFT;
|
|
|
|
if (impl)
|
|
*impl = mc_boot_0_implementation_v(val);
|
|
|
|
if (rev)
|
|
*rev = (mc_boot_0_major_revision_v(val) << 4) |
|
|
mc_boot_0_minor_revision_v(val);
|
|
|
|
return val;
|
|
}
|
|
|
|
bool mc_gk20a_is_intr1_pending(struct gk20a *g,
|
|
enum nvgpu_unit unit, u32 mc_intr_1)
|
|
{
|
|
u32 mask = 0;
|
|
bool is_pending;
|
|
|
|
switch (unit) {
|
|
case NVGPU_UNIT_FIFO:
|
|
mask = mc_intr_0_pfifo_pending_f();
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
if (mask == 0) {
|
|
nvgpu_err(g, "unknown unit %d", unit);
|
|
is_pending = false;
|
|
} else {
|
|
is_pending = (mc_intr_1 & mask) ? true : false;
|
|
}
|
|
|
|
return is_pending;
|
|
}
|
|
|
|
void gk20a_init_mc(struct gpu_ops *gops)
|
|
{
|
|
gops->mc.intr_enable = mc_gk20a_intr_enable;
|
|
gops->mc.intr_unit_config = mc_gk20a_intr_unit_config;
|
|
gops->mc.isr_stall = mc_gk20a_isr_stall;
|
|
gops->mc.intr_stall = mc_gk20a_intr_stall;
|
|
gops->mc.intr_stall_pause = mc_gk20a_intr_stall_pause;
|
|
gops->mc.intr_stall_resume = mc_gk20a_intr_stall_resume;
|
|
gops->mc.intr_nonstall = mc_gk20a_intr_nonstall;
|
|
gops->mc.intr_nonstall_pause = mc_gk20a_intr_nonstall_pause;
|
|
gops->mc.intr_nonstall_resume = mc_gk20a_intr_nonstall_resume;
|
|
gops->mc.enable = gk20a_mc_enable;
|
|
gops->mc.disable = gk20a_mc_disable;
|
|
gops->mc.reset = gk20a_mc_reset;
|
|
gops->mc.boot_0 = gk20a_mc_boot_0;
|
|
gops->mc.is_intr1_pending = mc_gk20a_is_intr1_pending;
|
|
}
|