nvgpu: gpu: simplify waiting logic for interrupt handler

The atomic counter in interrupt handler can overflow and result in
calling of BUG() which will crash the process. The equivalent
functionality can be implemented with just setting an atomic variable at
start of handler and resetting at end of handler. The wait can be longer
in case there is constant interrupts coming but ultimately it will end.
Generally the wait path is not time critical so it should not be an
issue. Also, fix the unit tests for mc.

Change-Id: I9b8a236f72e057e89a969d2e98d4d3f9be81b379
Signed-off-by: shashank singh <shashsingh@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2247819
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
shashank singh
2019-12-17 17:19:17 +05:30
committed by Alex Waterman
parent bd5604bba7
commit d34bad0a27
4 changed files with 19 additions and 65 deletions

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -769,27 +769,19 @@ int test_wait_for_deferred_interrupts(struct unit_module *m, struct gk20a *g,
nvgpu_cond_init(&g->mc.sw_irq_nonstall_last_handled_cond);
/* immediate completion */
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0);
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0);
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 0);
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 0);
nvgpu_wait_for_deferred_interrupts(g);
/* cause timeout */
nvgpu_posix_enable_fault_injection(cond_fi, true, 0);
/* wait on stall until timeout for branch coverage */
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 1);
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 0);
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
nvgpu_atomic_set(&g->mc.sw_irq_stall_pending, 1);
nvgpu_wait_for_deferred_interrupts(g);
/* wait on nonstall until timeout for branch coverage */
nvgpu_atomic_set(&g->mc.hw_irq_stall_count, 0);
nvgpu_atomic_set(&g->mc.sw_irq_stall_last_handled, 0);
nvgpu_atomic_set(&g->mc.hw_irq_nonstall_count, 1);
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_last_handled, 0);
nvgpu_atomic_set(&g->mc.sw_irq_nonstall_pending, 1);
nvgpu_wait_for_deferred_interrupts(g);
return UNIT_SUCCESS;