gpu: nvgpu: Move interrupt ISR code to common

This is one of the steps in restructuring of interrupt code.
- Move ISR logic to common code. This will allow us to add mixed ASIL
error handling levels.
- Modify nonstall ISR to use threaded interrupts. Bottom half of
nonstall ISR will run nonstall operations instead of adding work to
workqueues.
- Remove nonstall workqueue implementation.

JIRA NVGPU-6351

Change-Id: I5f891b0de4b0c34f6ac05522a5da08dc36221aa6
Signed-off-by: Vedashree Vidwans <vvidwans@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2467713
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Vedashree Vidwans
2021-01-07 21:55:47 -08:00
committed by mobile promotions
parent ecfd675d9b
commit e445b57b04
8 changed files with 269 additions and 172 deletions

View File

@@ -1,5 +1,5 @@
# #
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All Rights Reserved. # Copyright (c) 2019-2021, NVIDIA CORPORATION. All Rights Reserved.
# #
# Linux elements and units in nvgpu. # Linux elements and units in nvgpu.
# #
@@ -99,7 +99,7 @@ fuse:
sources: [ os/linux/fuse.c ] sources: [ os/linux/fuse.c ]
intr: intr:
sources: [ os/linux/intr.c, os/linux/intr.h ] sources: [ os/linux/intr.c ]
io: io:
sources: [ os/linux/io_usermode.c, sources: [ os/linux/io_usermode.c,

View File

@@ -1,7 +1,7 @@
/* /*
* GK20A Master Interrupt Control * GK20A Master Interrupt Control
* *
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,6 +25,8 @@
#include <nvgpu/mc.h> #include <nvgpu/mc.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/bug.h> #include <nvgpu/bug.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/trace.h>
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
{ {
@@ -123,3 +125,111 @@ void nvgpu_mc_intr_nonstall_resume(struct gk20a *g)
g->ops.mc.intr_nonstall_resume(g); g->ops.mc.intr_nonstall_resume(g);
nvgpu_spinunlock_irqrestore(&g->mc.intr_lock, flags); nvgpu_spinunlock_irqrestore(&g->mc.intr_lock, flags);
} }
static void nvgpu_intr_nonstall_work(struct gk20a *g, u32 work_ops)
{
bool semaphore_wakeup, post_events;
semaphore_wakeup =
(((work_ops & NVGPU_NONSTALL_OPS_WAKEUP_SEMAPHORE) != 0U) ?
true : false);
post_events = (((work_ops & NVGPU_NONSTALL_OPS_POST_EVENTS) != 0U) ?
true : false);
if (semaphore_wakeup) {
g->ops.semaphore_wakeup(g, post_events);
}
}
u32 nvgpu_intr_nonstall_isr(struct gk20a *g)
{
u32 non_stall_intr_val = 0U;
if (nvgpu_is_powered_off(g)) {
return NVGPU_INTR_UNMASK;
}
/* not from gpu when sharing irq with others */
non_stall_intr_val = g->ops.mc.intr_nonstall(g);
if (non_stall_intr_val == 0U) {
return NVGPU_INTR_NONE;
}
nvgpu_mc_intr_nonstall_pause(g);
if (g->sw_quiesce_pending) {
return NVGPU_INTR_QUIESCE_PENDING;
}
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1);
return NVGPU_INTR_HANDLE;
}
void nvgpu_intr_nonstall_handle(struct gk20a *g)
{
int err;
u32 nonstall_ops = 0;
nonstall_ops = g->ops.mc.isr_nonstall(g);
if (nonstall_ops != 0U) {
nvgpu_intr_nonstall_work(g, nonstall_ops);
}
/* sync handled irq counter before re-enabling interrupts */
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0);
nvgpu_mc_intr_nonstall_resume(g);
err = nvgpu_cond_broadcast(&g->mc.sw_irq_nonstall_last_handled_cond);
if (err != 0) {
nvgpu_err(g, "nvgpu_cond_broadcast failed err=%d", err);
}
}
u32 nvgpu_intr_stall_isr(struct gk20a *g)
{
u32 mc_intr_0 = 0U;
nvgpu_trace_intr_stall_start(g);
if (nvgpu_is_powered_off(g)) {
return NVGPU_INTR_UNMASK;
}
/* not from gpu when sharing irq with others */
mc_intr_0 = g->ops.mc.intr_stall(g);
if (mc_intr_0 == 0U) {
return NVGPU_INTR_NONE;
}
nvgpu_mc_intr_stall_pause(g);
if (g->sw_quiesce_pending) {
return NVGPU_INTR_QUIESCE_PENDING;
}
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1);
nvgpu_trace_intr_stall_done(g);
return NVGPU_INTR_HANDLE;
}
void nvgpu_intr_stall_handle(struct gk20a *g)
{
int err;
nvgpu_trace_intr_thread_stall_start(g);
g->ops.mc.isr_stall(g);
nvgpu_trace_intr_thread_stall_done(g);
/* sync handled irq counter before re-enabling interrupts */
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0);
nvgpu_mc_intr_stall_resume(g);
err = nvgpu_cond_broadcast(&g->mc.sw_irq_stall_last_handled_cond);
if (err != 0) {
nvgpu_err(g, "nvgpu_cond_broadcast failed err=%d", err);
}
}

View File

@@ -163,6 +163,34 @@ struct nvgpu_device;
/** Bit offset of the Architecture field in the HW version register */ /** Bit offset of the Architecture field in the HW version register */
#define NVGPU_GPU_ARCHITECTURE_SHIFT 4U #define NVGPU_GPU_ARCHITECTURE_SHIFT 4U
/**
* @defgroup NVGPU_MC_INTR_PENDING_DEFINES
*
* Defines of all MC unit interrupt pending scenarios.
*/
/**
* @ingroup NVGPU_MC_INTR_PENDING_DEFINES
* Indicates that pending interrupts should be handled in the ISR thread.
*/
#define NVGPU_INTR_HANDLE 0U
/**
* @ingroup NVGPU_MC_INTR_PENDING_DEFINES
* Indicates that pending interrupts are erroneous and should be cleared.
*/
#define NVGPU_INTR_UNMASK BIT32(0)
/**
* @ingroup NVGPU_MC_INTR_PENDING_DEFINES
* Indicates that there are no pending interrupts.
*/
#define NVGPU_INTR_NONE BIT32(1)
/**
* @ingroup NVGPU_MC_INTR_PENDING_DEFINES
* Indicates that quiesce state is pending. This basically means there is no
* need to handle interrupts (if any) as driver will enter quiesce state.
*/
#define NVGPU_INTR_QUIESCE_PENDING BIT32(2)
/** /**
* @defgroup NVGPU_MC_INTR_TYPE_DEFINES * @defgroup NVGPU_MC_INTR_TYPE_DEFINES
* *
@@ -590,4 +618,57 @@ int nvgpu_mc_reset_dev(struct gk20a *g, const struct nvgpu_device *dev);
*/ */
int nvgpu_mc_reset_devtype(struct gk20a *g, u32 devtype); int nvgpu_mc_reset_devtype(struct gk20a *g, u32 devtype);
/**
* @brief Top half of stall interrupt ISR.
*
* @param g [in] The GPU driver struct.
*
* This function is invoked by stall interrupt ISR to check if there are
* any pending stall interrupts. The function will return the action to
* be taken based on stall interrupt, gpu and quiesce status.
*
* @retval NVGPU_INTR_HANDLE if stall interrupts are pending.
* @retval NVGPU_INTR_UNMASK if GPU is powered off.
* @retval NVGPU_INTR_NONE if none of the stall interrupts are pending.
* @retval NVGPU_INTR_QUIESCE_PENDING if quiesce is pending.
*/
u32 nvgpu_intr_stall_isr(struct gk20a *g);
/**
* @brief Bottom half of stall interrupt ISR.
*
* @param g [in] The GPU driver struct.
*
* This function is called to take action based on pending stall interrupts.
* The unit ISR functions are invoked based on triggered stall interrupts.
*/
void nvgpu_intr_stall_handle(struct gk20a *g);
/**
* @brief Top half of nonstall interrupt ISR.
*
* @param g [in] The GPU driver struct.
*
* This function is invoked by nonstall interrupt ISR to check if there are
* any pending nonstall interrupts. The function will return the action to
* be taken based on nonstall interrupt, gpu and quiesce status.
*
* @retval NVGPU_INTR_HANDLE if nonstall interrupts are pending.
* @retval NVGPU_INTR_UNMASK if GPU is powered off.
* @retval NVGPU_INTR_NONE if none of the nonstall interrupts are pending.
* @retval NVGPU_INTR_QUIESCE_PENDING if quiesce is pending.
*/
u32 nvgpu_intr_nonstall_isr(struct gk20a *g);
/**
* @brief Bottom half of nonstall interrupt ISR.
*
* @param g [in] The GPU driver struct.
*
* This function is called to take action based on pending nonstall interrupts.
* Based on triggered nonstall interrupts, this function will invoke
* nonstall operations.
*/
void nvgpu_intr_nonstall_handle(struct gk20a *g);
#endif #endif

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -23,12 +23,38 @@
#ifndef NVGPU_TRACE_H #ifndef NVGPU_TRACE_H
#define NVGPU_TRACE_H #define NVGPU_TRACE_H
#ifdef CONFIG_NVGPU_TRACE struct gk20a;
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_NVGPU_TRACE
#include <trace/events/gk20a.h> #include <trace/events/gk20a.h>
#endif
void nvgpu_trace_intr_stall_start(struct gk20a *g);
void nvgpu_trace_intr_stall_done(struct gk20a *g);
void nvgpu_trace_intr_thread_stall_start(struct gk20a *g);
void nvgpu_trace_intr_thread_stall_done(struct gk20a *g);
#elif defined(__NVGPU_POSIX__)
#ifdef CONFIG_NVGPU_TRACE
#include <nvgpu/posix/trace_gk20a.h>
#endif /* CONFIG_NVGPU_TRACE */
static inline void nvgpu_trace_intr_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g) {}
static inline void nvgpu_trace_intr_thread_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_thread_stall_done(struct gk20a *g) {}
#else #else
#ifdef CONFIG_NVGPU_TRACE
#include <nvgpu/posix/trace_gk20a.h> #include <nvgpu/posix/trace_gk20a.h>
#endif #endif
#endif static inline void nvgpu_trace_intr_stall_start(struct gk20a *g) {}
static inline void nvgpu_trace_intr_stall_done(struct gk20a *g) {}
void nvgpu_trace_intr_thread_stall_start(struct gk20a *g);
void nvgpu_trace_intr_thread_stall_done(struct gk20a *g);
#endif /* __KERNEL__ */
#endif /* NVGPU_TRACE_H */ #endif /* NVGPU_TRACE_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -12,133 +12,34 @@
*/ */
#include <nvgpu/trace.h> #include <nvgpu/trace.h>
#include <linux/irqreturn.h>
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/mc.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/atomic.h>
#include "os_linux.h"
irqreturn_t nvgpu_intr_stall(struct gk20a *g) void nvgpu_trace_intr_thread_stall_start(struct gk20a *g)
{ {
u32 mc_intr_0;
#ifdef CONFIG_NVGPU_TRACE
trace_mc_gk20a_intr_stall(g->name);
#endif
if (nvgpu_is_powered_off(g))
return IRQ_NONE;
/* not from gpu when sharing irq with others */
mc_intr_0 = g->ops.mc.intr_stall(g);
if (unlikely(!mc_intr_0))
return IRQ_NONE;
nvgpu_mc_intr_stall_pause(g);
if (g->sw_quiesce_pending) {
return IRQ_NONE;
}
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1);
#ifdef CONFIG_NVGPU_TRACE
trace_mc_gk20a_intr_stall_done(g->name);
#endif
return IRQ_WAKE_THREAD;
}
irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
{
nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
#ifdef CONFIG_NVGPU_TRACE #ifdef CONFIG_NVGPU_TRACE
trace_mc_gk20a_intr_thread_stall(g->name); trace_mc_gk20a_intr_thread_stall(g->name);
#endif #endif
}
g->ops.mc.isr_stall(g); void nvgpu_trace_intr_thread_stall_done(struct gk20a *g)
/* sync handled irq counter before re-enabling interrupts */ {
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0);
nvgpu_mc_intr_stall_resume(g);
nvgpu_cond_broadcast(&g->mc.sw_irq_stall_last_handled_cond);
#ifdef CONFIG_NVGPU_TRACE #ifdef CONFIG_NVGPU_TRACE
trace_mc_gk20a_intr_thread_stall_done(g->name); trace_mc_gk20a_intr_thread_stall_done(g->name);
#endif #endif
return IRQ_HANDLED;
} }
irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) void nvgpu_trace_intr_stall_start(struct gk20a *g)
{ {
u32 non_stall_intr_val; #ifdef CONFIG_NVGPU_TRACE
int ops_old, ops_new, ops = 0; trace_mc_gk20a_intr_stall(g->name);
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); #endif
if (nvgpu_is_powered_off(g))
return IRQ_NONE;
/* not from gpu when sharing irq with others */
non_stall_intr_val = g->ops.mc.intr_nonstall(g);
if (unlikely(!non_stall_intr_val))
return IRQ_NONE;
nvgpu_mc_intr_nonstall_pause(g);
if (g->sw_quiesce_pending) {
return IRQ_NONE;
}
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1);
ops = g->ops.mc.isr_nonstall(g);
if (ops) {
do {
ops_old = atomic_read(&l->nonstall_ops);
ops_new = ops_old | ops;
} while (ops_old != atomic_cmpxchg(&l->nonstall_ops,
ops_old, ops_new));
queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
}
/* sync handled irq counter before re-enabling interrupts */
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0);
nvgpu_mc_intr_nonstall_resume(g);
nvgpu_cond_broadcast(&g->mc.sw_irq_nonstall_last_handled_cond);
return IRQ_HANDLED;
} }
static void mc_gk20a_handle_intr_nonstall(struct gk20a *g, u32 ops) void nvgpu_trace_intr_stall_done(struct gk20a *g)
{ {
bool semaphore_wakeup, post_events; #ifdef CONFIG_NVGPU_TRACE
trace_mc_gk20a_intr_stall_done(g->name);
semaphore_wakeup = #endif
(((ops & NVGPU_NONSTALL_OPS_WAKEUP_SEMAPHORE) != 0U) ?
true : false);
post_events = (((ops & NVGPU_NONSTALL_OPS_POST_EVENTS) != 0U) ?
true: false);
if (semaphore_wakeup) {
g->ops.semaphore_wakeup(g, post_events);
}
}
void nvgpu_intr_nonstall_cb(struct work_struct *work)
{
struct nvgpu_os_linux *l =
container_of(work, struct nvgpu_os_linux, nonstall_fn_work);
struct gk20a *g = &l->g;
do {
u32 ops;
ops = atomic_xchg(&l->nonstall_ops, 0);
mc_gk20a_handle_intr_nonstall(g, ops);
} while (atomic_read(&l->nonstall_ops) != 0);
} }

View File

@@ -1,22 +0,0 @@
/*
* Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#ifndef __NVGPU_LINUX_INTR_H__
#define __NVGPU_LINUX_INTR_H__
struct gk20a;
irqreturn_t nvgpu_intr_stall(struct gk20a *g);
irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g);
irqreturn_t nvgpu_intr_nonstall(struct gk20a *g);
void nvgpu_intr_nonstall_cb(struct work_struct *work);
#endif

View File

@@ -70,7 +70,6 @@
#include "module.h" #include "module.h"
#include "module_usermode.h" #include "module_usermode.h"
#include "intr.h"
#include "ioctl.h" #include "ioctl.h"
#include "ioctl_ctrl.h" #include "ioctl_ctrl.h"
@@ -437,13 +436,6 @@ int gk20a_pm_finalize_poweron(struct device *dev)
nvgpu_restore_usermode_for_poweron(g); nvgpu_restore_usermode_for_poweron(g);
/* Enable interrupt workqueue */
if (!l->nonstall_work_queue) {
l->nonstall_work_queue = alloc_workqueue("%s",
WQ_HIGHPRI, 1, "mc_nonstall");
INIT_WORK(&l->nonstall_fn_work, nvgpu_intr_nonstall_cb);
}
err = nvgpu_detect_chip(g); err = nvgpu_detect_chip(g);
if (err) if (err)
goto done; goto done;
@@ -931,22 +923,33 @@ u64 nvgpu_resource_addr(struct platform_device *dev, int i)
static irqreturn_t gk20a_intr_isr_stall(int irq, void *dev_id) static irqreturn_t gk20a_intr_isr_stall(int irq, void *dev_id)
{ {
struct gk20a *g = dev_id; struct gk20a *g = dev_id;
u32 err = nvgpu_intr_stall_isr(g);
return nvgpu_intr_stall(g); return err == NVGPU_INTR_HANDLE ? IRQ_WAKE_THREAD : IRQ_NONE;
}
static irqreturn_t gk20a_intr_thread_isr_stall(int irq, void *dev_id)
{
struct gk20a *g = dev_id;
nvgpu_intr_stall_handle(g);
return IRQ_HANDLED;
} }
static irqreturn_t gk20a_intr_isr_nonstall(int irq, void *dev_id) static irqreturn_t gk20a_intr_isr_nonstall(int irq, void *dev_id)
{ {
struct gk20a *g = dev_id; struct gk20a *g = dev_id;
u32 err = nvgpu_intr_nonstall_isr(g);
return nvgpu_intr_nonstall(g); return err == NVGPU_INTR_HANDLE ? IRQ_WAKE_THREAD : IRQ_NONE;
} }
static irqreturn_t gk20a_intr_thread_stall(int irq, void *dev_id) static irqreturn_t gk20a_intr_thread_isr_nonstall(int irq, void *dev_id)
{ {
struct gk20a *g = dev_id; struct gk20a *g = dev_id;
return nvgpu_intr_thread_stall(g); nvgpu_intr_nonstall_handle(g);
return IRQ_HANDLED;
} }
void gk20a_remove_support(struct gk20a *g) void gk20a_remove_support(struct gk20a *g)
@@ -1495,8 +1498,6 @@ out:
*/ */
void gk20a_driver_start_unload(struct gk20a *g) void gk20a_driver_start_unload(struct gk20a *g)
{ {
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n"); nvgpu_log(g, gpu_dbg_shutdown, "Driver is now going down!\n");
nvgpu_start_gpu_idle(g); nvgpu_start_gpu_idle(g);
@@ -1507,12 +1508,6 @@ void gk20a_driver_start_unload(struct gk20a *g)
nvgpu_wait_for_idle(g); nvgpu_wait_for_idle(g);
nvgpu_wait_for_deferred_interrupts(g); nvgpu_wait_for_deferred_interrupts(g);
if (l->nonstall_work_queue) {
cancel_work_sync(&l->nonstall_fn_work);
destroy_workqueue(l->nonstall_work_queue);
l->nonstall_work_queue = NULL;
}
} }
static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a) static inline void set_gk20a(struct platform_device *pdev, struct gk20a *gk20a)
@@ -1660,7 +1655,7 @@ static int gk20a_probe(struct platform_device *dev)
err = devm_request_threaded_irq(&dev->dev, err = devm_request_threaded_irq(&dev->dev,
l->interrupts.stall_lines[i], l->interrupts.stall_lines[i],
gk20a_intr_isr_stall, gk20a_intr_isr_stall,
gk20a_intr_thread_stall, gk20a_intr_thread_isr_stall,
0, "gk20a_stall", gk20a); 0, "gk20a_stall", gk20a);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,
@@ -1671,9 +1666,10 @@ static int gk20a_probe(struct platform_device *dev)
} }
} }
if (l->interrupts.nonstall_size > 0) { if (l->interrupts.nonstall_size > 0) {
err = devm_request_irq(&dev->dev, err = devm_request_threaded_irq(&dev->dev,
l->interrupts.nonstall_line, l->interrupts.nonstall_line,
gk20a_intr_isr_nonstall, gk20a_intr_isr_nonstall,
gk20a_intr_thread_isr_nonstall,
0, "gk20a_nonstall", gk20a); 0, "gk20a_nonstall", gk20a);
if (err) { if (err) {
dev_err(&dev->dev, dev_err(&dev->dev,

View File

@@ -23,6 +23,7 @@
#include <nvgpu/nvhost.h> #include <nvgpu/nvhost.h>
#include <nvgpu/nvgpu_common.h> #include <nvgpu/nvgpu_common.h>
#include <nvgpu/kmem.h> #include <nvgpu/kmem.h>
#include <nvgpu/mc.h>
#include <nvgpu/enabled.h> #include <nvgpu/enabled.h>
#include <nvgpu/nvlink_probe.h> #include <nvgpu/nvlink_probe.h>
#include <nvgpu/soc.h> #include <nvgpu/soc.h>
@@ -34,7 +35,6 @@
#include "nvlink.h" #include "nvlink.h"
#include "module.h" #include "module.h"
#include "intr.h"
#include "sysfs.h" #include "sysfs.h"
#include "os_linux.h" #include "os_linux.h"
#include "platform_gk20a.h" #include "platform_gk20a.h"
@@ -323,11 +323,8 @@ static struct pci_device_id nvgpu_pci_table[] = {
static irqreturn_t nvgpu_pci_isr(int irq, void *dev_id) static irqreturn_t nvgpu_pci_isr(int irq, void *dev_id)
{ {
struct gk20a *g = dev_id; struct gk20a *g = dev_id;
irqreturn_t ret_stall; u32 ret_stall = nvgpu_intr_stall_isr(g);
irqreturn_t ret_nonstall; u32 ret_nonstall = nvgpu_intr_nonstall_isr(g);
ret_stall = nvgpu_intr_stall(g);
ret_nonstall = nvgpu_intr_nonstall(g);
#if defined(CONFIG_PCI_MSI) #if defined(CONFIG_PCI_MSI)
/* Send MSI EOI */ /* Send MSI EOI */
@@ -335,14 +332,22 @@ static irqreturn_t nvgpu_pci_isr(int irq, void *dev_id)
g->ops.xve.rearm_msi(g); g->ops.xve.rearm_msi(g);
#endif #endif
return (ret_stall == IRQ_NONE) ? ret_nonstall : IRQ_WAKE_THREAD; if ((ret_stall == NVGPU_INTR_HANDLE) ||
(ret_nonstall == NVGPU_INTR_HANDLE)) {
return IRQ_WAKE_THREAD;
}
return IRQ_NONE;
} }
static irqreturn_t nvgpu_pci_intr_thread(int irq, void *dev_id) static irqreturn_t nvgpu_pci_intr_thread(int irq, void *dev_id)
{ {
struct gk20a *g = dev_id; struct gk20a *g = dev_id;
return nvgpu_intr_thread_stall(g); nvgpu_intr_stall_handle(g);
nvgpu_intr_nonstall_handle(g);
return IRQ_HANDLED;
} }
static int nvgpu_pci_init_support(struct pci_dev *pdev) static int nvgpu_pci_init_support(struct pci_dev *pdev)