gpu: nvgpu: do not use raw spinlock for ch->timeout.lock

With PREEMPT_RT kernel, regular spinlocks are mapped onto sleeping
spinlocks (rt_mutex locks), and raw spinlocks retain their behaviour.

Schedule while atomic can occur in gk20a_channel_timeout_start,
as it acquires ch->timeout.lock raw spinlock, and then calls
functions that acquire ch->ch_timedout_lock regular spinlock.

Bug 200484795

Change-Id: Iacc63195d8ee6a2d571c998da1b4b5d396f49439
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2004100
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-01-25 11:09:52 -08:00
committed by mobile promotions
parent 7b19a825bd
commit aacc33bb47
2 changed files with 16 additions and 16 deletions

View File

@@ -1526,14 +1526,14 @@ static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
return;
}
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
if (ch->timeout.running) {
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
return;
}
__gk20a_channel_timeout_start(ch);
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
}
/**
@@ -1551,10 +1551,10 @@ static bool gk20a_channel_timeout_stop(struct channel_gk20a *ch)
{
bool was_running;
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
was_running = ch->timeout.running;
ch->timeout.running = false;
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
return was_running;
}
@@ -1569,9 +1569,9 @@ static bool gk20a_channel_timeout_stop(struct channel_gk20a *ch)
*/
static void gk20a_channel_timeout_continue(struct channel_gk20a *ch)
{
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
ch->timeout.running = true;
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
}
/**
@@ -1586,11 +1586,11 @@ static void gk20a_channel_timeout_continue(struct channel_gk20a *ch)
*/
static void gk20a_channel_timeout_rewind(struct channel_gk20a *ch)
{
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
if (ch->timeout.running) {
__gk20a_channel_timeout_start(ch);
}
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
}
/**
@@ -1645,10 +1645,10 @@ static void gk20a_channel_timeout_handler(struct channel_gk20a *ch)
}
/* Get status but keep timer running */
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
gp_get = ch->timeout.gp_get;
pb_get = ch->timeout.pb_get;
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
new_gp_get = g->ops.fifo.userd_gp_get(ch->g, ch);
new_pb_get = g->ops.fifo.userd_pb_get(ch->g, ch);
@@ -1688,9 +1688,9 @@ static void gk20a_channel_timeout_check(struct channel_gk20a *ch)
{
bool running;
nvgpu_raw_spinlock_acquire(&ch->timeout.lock);
nvgpu_spinlock_acquire(&ch->timeout.lock);
running = ch->timeout.running;
nvgpu_raw_spinlock_release(&ch->timeout.lock);
nvgpu_spinlock_release(&ch->timeout.lock);
if (running) {
gk20a_channel_timeout_handler(ch);
@@ -2343,7 +2343,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
nvgpu_spinlock_init(&c->ref_actions_lock);
#endif
nvgpu_spinlock_init(&c->joblist.dynamic.lock);
nvgpu_raw_spinlock_init(&c->timeout.lock);
nvgpu_spinlock_init(&c->timeout.lock);
nvgpu_init_list_node(&c->joblist.dynamic.jobs);
nvgpu_init_list_node(&c->dbg_s_list);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -168,7 +168,7 @@ struct channel_gk20a_joblist {
struct channel_gk20a_timeout {
/* lock protects the running timer state */
struct nvgpu_raw_spinlock lock;
struct nvgpu_spinlock lock;
struct nvgpu_timeout timer;
bool running;
u32 gp_get;