mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
nvgpu: gpu: simplify waiting logic for interrupt handler
The atomic counter in interrupt handler can overflow and result in calling of BUG() which will crash the process. The equivalent functionality can be implemented with just setting an atomic variable at start of handler and resetting at end of handler. The wait can be longer in case there is constant interrupts coming but ultimately it will end. Generally the wait path is not time critical so it should not be an issue. Also, fix the unit tests for mc. Change-Id: I9b8a236f72e057e89a969d2e98d4d3f9be81b379 Signed-off-by: shashank singh <shashsingh@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2247819 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
Alex Waterman
parent
bd5604bba7
commit
d34bad0a27
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* GK20A Master Control
|
||||
*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -25,19 +25,6 @@
|
||||
#include <nvgpu/mc.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
|
||||
/**
|
||||
* cyclic_delta - Returns delta of cyclic integers a and b.
|
||||
*
|
||||
* @a - First integer
|
||||
* @b - Second integer
|
||||
*
|
||||
* Note: if a is ahead of b, delta is positive.
|
||||
*/
|
||||
static int cyclic_delta(int a, int b)
|
||||
{
|
||||
return nvgpu_safe_sub_s32(a, b);
|
||||
}
|
||||
|
||||
/**
|
||||
* nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete
|
||||
*
|
||||
@@ -48,21 +35,15 @@ static int cyclic_delta(int a, int b)
|
||||
*/
|
||||
void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
|
||||
{
|
||||
int stall_irq_threshold = nvgpu_atomic_read(&g->mc.hw_irq_stall_count);
|
||||
int nonstall_irq_threshold =
|
||||
nvgpu_atomic_read(&g->mc.hw_irq_nonstall_count);
|
||||
|
||||
/* wait until all stalling irqs are handled */
|
||||
NVGPU_COND_WAIT(&g->mc.sw_irq_stall_last_handled_cond,
|
||||
cyclic_delta(stall_irq_threshold,
|
||||
nvgpu_atomic_read(&g->mc.sw_irq_stall_last_handled))
|
||||
<= 0, 0U);
|
||||
nvgpu_atomic_read(&g->mc.sw_irq_stall_pending) == 0,
|
||||
0U);
|
||||
|
||||
/* wait until all non-stalling irqs are handled */
|
||||
NVGPU_COND_WAIT(&g->mc.sw_irq_nonstall_last_handled_cond,
|
||||
cyclic_delta(nonstall_irq_threshold,
|
||||
nvgpu_atomic_read(&g->mc.sw_irq_nonstall_last_handled))
|
||||
<= 0, 0U);
|
||||
nvgpu_atomic_read(&g->mc.sw_irq_nonstall_pending) == 0,
|
||||
0U);
|
||||
}
|
||||
|
||||
void nvgpu_mc_intr_mask(struct gk20a *g)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -202,20 +202,6 @@ struct nvgpu_mc {
|
||||
* the deferred interrupts.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Stalling interrupt counter - incremented on receipt of the stalling
|
||||
* interrupt in #isr_stall and read in the function
|
||||
* #nvgpu_wait_for_deferred_interrupts.
|
||||
*/
|
||||
nvgpu_atomic_t hw_irq_stall_count;
|
||||
|
||||
/**
|
||||
* Non-stalling interrupt counter - incremented on receipt of the
|
||||
* non-stalling interrupt in #isr_nonstall and read in the function
|
||||
* #nvgpu_wait_for_deferred_interrupts.
|
||||
*/
|
||||
nvgpu_atomic_t hw_irq_nonstall_count;
|
||||
|
||||
/**
|
||||
* The condition variable that is signalled upon handling of the
|
||||
* stalling interrupt. It is wait upon by the function
|
||||
@@ -227,7 +213,7 @@ struct nvgpu_mc {
|
||||
* Stalling interrupt status counter - updated on handling of the
|
||||
* stalling interrupt.
|
||||
*/
|
||||
nvgpu_atomic_t sw_irq_stall_last_handled;
|
||||
nvgpu_atomic_t sw_irq_stall_pending;
|
||||
|
||||
/**
|
||||
* The condition variable that is signalled upon handling of the
|
||||
@@ -240,7 +226,7 @@ struct nvgpu_mc {
|
||||
* Non-stalling interrupt status counter - updated on handling of the
|
||||
* non-stalling interrupt.
|
||||
*/
|
||||
nvgpu_atomic_t sw_irq_nonstall_last_handled;
|
||||
nvgpu_atomic_t sw_irq_nonstall_pending;
|
||||
|
||||
/** nvgpu interrupts enabled status from host OS perspective */
|
||||
bool irqs_enabled;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -47,7 +47,7 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
|
||||
}
|
||||
#endif
|
||||
|
||||
nvgpu_atomic_inc(&g->mc.hw_irq_stall_count);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1);
|
||||
|
||||
#ifdef CONFIG_NVGPU_TRACE
|
||||
trace_mc_gk20a_intr_stall_done(g->name);
|
||||
@@ -58,19 +58,16 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g)
|
||||
|
||||
irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
|
||||
{
|
||||
int hw_irq_count;
|
||||
|
||||
nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched");
|
||||
|
||||
#ifdef CONFIG_NVGPU_TRACE
|
||||
trace_mc_gk20a_intr_thread_stall(g->name);
|
||||
#endif
|
||||
|
||||
hw_irq_count = nvgpu_atomic_read(&g->mc.hw_irq_stall_count);
|
||||
g->ops.mc.isr_stall(g);
|
||||
nvgpu_mc_intr_stall_resume(g);
|
||||
/* sync handled irq counter before re-enabling interrupts */
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, hw_irq_count);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0);
|
||||
nvgpu_mc_intr_stall_resume(g);
|
||||
|
||||
nvgpu_cond_broadcast(&g->mc.sw_irq_stall_last_handled_cond);
|
||||
|
||||
@@ -84,7 +81,6 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
|
||||
irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
|
||||
{
|
||||
u32 non_stall_intr_val;
|
||||
u32 hw_irq_count;
|
||||
int ops_old, ops_new, ops = 0;
|
||||
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
|
||||
|
||||
@@ -103,6 +99,7 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
|
||||
}
|
||||
#endif
|
||||
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1);
|
||||
ops = g->ops.mc.isr_nonstall(g);
|
||||
if (ops) {
|
||||
do {
|
||||
@@ -114,10 +111,8 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
|
||||
queue_work(l->nonstall_work_queue, &l->nonstall_fn_work);
|
||||
}
|
||||
|
||||
hw_irq_count = nvgpu_atomic_inc_return(&g->mc.hw_irq_nonstall_count);
|
||||
|
||||
/* sync handled irq counter before re-enabling interrupts */
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, hw_irq_count);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0);
|
||||
|
||||
nvgpu_mc_intr_nonstall_resume(g);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -769,27 +769,19 @@ int test_wait_for_deferred_interrupts(struct unit_module *m, struct gk20a *g,
|
||||
nvgpu_cond_init(&g->mc.sw_irq_nonstall_last_handled_cond);
|
||||
|
||||
/* immediate completion */
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0);
|
||||
nvgpu_wait_for_deferred_interrupts(g);
|
||||
|
||||
/* cause timeout */
|
||||
nvgpu_posix_enable_fault_injection(cond_fi, true, 0);
|
||||
|
||||
/* wait on stall until timeout for branch coverage */
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 1);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1);
|
||||
nvgpu_wait_for_deferred_interrupts(g);
|
||||
|
||||
/* wait on nonstall until timeout for branch coverage */
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 1);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
|
||||
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1);
|
||||
nvgpu_wait_for_deferred_interrupts(g);
|
||||
|
||||
return UNIT_SUCCESS;
|
||||
|
||||
Reference in New Issue
Block a user