diff --git a/drivers/gpu/nvgpu/common/mc/mc.c b/drivers/gpu/nvgpu/common/mc/mc.c index d1fe3cd69..80cbf2863 100644 --- a/drivers/gpu/nvgpu/common/mc/mc.c +++ b/drivers/gpu/nvgpu/common/mc/mc.c @@ -1,7 +1,7 @@ /* * GK20A Master Control * - * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -25,19 +25,6 @@ #include #include -/** - * cyclic_delta - Returns delta of cyclic integers a and b. - * - * @a - First integer - * @b - Second integer - * - * Note: if a is ahead of b, delta is positive. - */ -static int cyclic_delta(int a, int b) -{ - return nvgpu_safe_sub_s32(a, b); -} - /** * nvgpu_wait_for_deferred_interrupts - Wait for interrupts to complete * @@ -48,21 +35,15 @@ static int cyclic_delta(int a, int b) */ void nvgpu_wait_for_deferred_interrupts(struct gk20a *g) { - int stall_irq_threshold = nvgpu_atomic_read(&g->mc.hw_irq_stall_count); - int nonstall_irq_threshold = - nvgpu_atomic_read(&g->mc.hw_irq_nonstall_count); - /* wait until all stalling irqs are handled */ NVGPU_COND_WAIT(&g->mc.sw_irq_stall_last_handled_cond, - cyclic_delta(stall_irq_threshold, - nvgpu_atomic_read(&g->mc.sw_irq_stall_last_handled)) - <= 0, 0U); + nvgpu_atomic_read(&g->mc.sw_irq_stall_pending) == 0, + 0U); /* wait until all non-stalling irqs are handled */ NVGPU_COND_WAIT(&g->mc.sw_irq_nonstall_last_handled_cond, - cyclic_delta(nonstall_irq_threshold, - nvgpu_atomic_read(&g->mc.sw_irq_nonstall_last_handled)) - <= 0, 0U); + nvgpu_atomic_read(&g->mc.sw_irq_nonstall_pending) == 0, + 0U); } void nvgpu_mc_intr_mask(struct gk20a *g) diff --git a/drivers/gpu/nvgpu/include/nvgpu/mc.h b/drivers/gpu/nvgpu/include/nvgpu/mc.h index 2f48582ac..4292bbcc4 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/mc.h +++ b/drivers/gpu/nvgpu/include/nvgpu/mc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -202,20 +202,6 @@ struct nvgpu_mc { * the deferred interrupts. */ - /** - * Stalling interrupt counter - incremented on receipt of the stalling - * interrupt in #isr_stall and read in the function - * #nvgpu_wait_for_deferred_interrupts. - */ - nvgpu_atomic_t hw_irq_stall_count; - - /** - * Non-stalling interrupt counter - incremented on receipt of the - * non-stalling interrupt in #isr_nonstall and read in the function - * #nvgpu_wait_for_deferred_interrupts. - */ - nvgpu_atomic_t hw_irq_nonstall_count; - /** * The condition variable that is signalled upon handling of the * stalling interrupt. It is wait upon by the function @@ -227,7 +213,7 @@ struct nvgpu_mc { * Stalling interrupt status counter - updated on handling of the * stalling interrupt. */ - nvgpu_atomic_t sw_irq_stall_last_handled; + nvgpu_atomic_t sw_irq_stall_pending; /** * The condition variable that is signalled upon handling of the @@ -240,7 +226,7 @@ struct nvgpu_mc { * Non-stalling interrupt status counter - updated on handling of the * non-stalling interrupt. */ - nvgpu_atomic_t sw_irq_nonstall_last_handled; + nvgpu_atomic_t sw_irq_nonstall_pending; /** nvgpu interrupts enabled status from host OS perspective */ bool irqs_enabled; diff --git a/drivers/gpu/nvgpu/os/linux/intr.c b/drivers/gpu/nvgpu/os/linux/intr.c index cd1f45f4b..976d41e8e 100644 --- a/drivers/gpu/nvgpu/os/linux/intr.c +++ b/drivers/gpu/nvgpu/os/linux/intr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -47,7 +47,7 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g) } #endif - nvgpu_atomic_inc(&g->mc.hw_irq_stall_count); + nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1); #ifdef CONFIG_NVGPU_TRACE trace_mc_gk20a_intr_stall_done(g->name); @@ -58,19 +58,16 @@ irqreturn_t nvgpu_intr_stall(struct gk20a *g) irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) { - int hw_irq_count; - nvgpu_log(g, gpu_dbg_intr, "interrupt thread launched"); #ifdef CONFIG_NVGPU_TRACE trace_mc_gk20a_intr_thread_stall(g->name); #endif - hw_irq_count = nvgpu_atomic_read(&g->mc.hw_irq_stall_count); g->ops.mc.isr_stall(g); - nvgpu_mc_intr_stall_resume(g); /* sync handled irq counter before re-enabling interrupts */ - nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, hw_irq_count); + nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0); + nvgpu_mc_intr_stall_resume(g); nvgpu_cond_broadcast(&g->mc.sw_irq_stall_last_handled_cond); @@ -84,7 +81,6 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g) irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) { u32 non_stall_intr_val; - u32 hw_irq_count; int ops_old, ops_new, ops = 0; struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); @@ -103,6 +99,7 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) } #endif + nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1); ops = g->ops.mc.isr_nonstall(g); if (ops) { do { @@ -114,10 +111,8 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g) queue_work(l->nonstall_work_queue, &l->nonstall_fn_work); } - hw_irq_count = nvgpu_atomic_inc_return(&g->mc.hw_irq_nonstall_count); - /* sync handled irq counter before re-enabling interrupts */ - nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, hw_irq_count); + nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0); nvgpu_mc_intr_nonstall_resume(g); diff --git a/userspace/units/mc/nvgpu-mc.c b/userspace/units/mc/nvgpu-mc.c index 4589061c1..c5e408da9 100644 --- a/userspace/units/mc/nvgpu-mc.c +++ b/userspace/units/mc/nvgpu-mc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -769,27 +769,19 @@ int test_wait_for_deferred_interrupts(struct unit_module *m, struct gk20a *g, nvgpu_cond_init(&g->mc.sw_irq_nonstall_last_handled_cond); /* immediate completion */ - nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0); - nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0); - nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0); - nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0); + nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0); + nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0); nvgpu_wait_for_deferred_interrupts(g); /* cause timeout */ nvgpu_posix_enable_fault_injection(cond_fi, true, 0); /* wait on stall until timeout for branch coverage */ - nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 1); - nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0); - nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0); - nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0); + nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1); nvgpu_wait_for_deferred_interrupts(g); /* wait on nonstall until timeout for branch coverage */ - nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0); - nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0); - nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 1); - nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0); + nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1); nvgpu_wait_for_deferred_interrupts(g); return UNIT_SUCCESS;