mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: ring doorbell after enabling TSG
In some use cases client will disable and preempt TSG and then re-enable it using IOCTLs provided In case there is only one context getting re-enabled and there is no other job submission in parallel runlist fetcher will just sleep until doorbell is received next time This causes above mentioned test cases to stall after re-enabling TSG until some one submits a new job and triggers a doorbell Fix this by explicitly triggering doorbell from gv11b_fifo_enable_tsg() after we enable all channels in TSG Bug 2205192 Change-Id: I08e70e3d0f7e4dc6471e63809e246430cc4200c1 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1772378 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
0297eed338
commit
c8347c8369
@@ -808,13 +808,18 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg)
|
||||
{
|
||||
struct gk20a *g = tsg->g;
|
||||
struct channel_gk20a *ch;
|
||||
struct channel_gk20a *last_ch = NULL;
|
||||
|
||||
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
|
||||
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
|
||||
g->ops.fifo.enable_channel(ch);
|
||||
last_ch = ch;
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
if (last_ch)
|
||||
g->ops.fifo.ring_channel_doorbell(last_ch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user