gpu: nvgpu: Use nvgpu_rwsem as TSG channel lock

Use abstract nvgpu_rwsem as TSG channel list lock instead of the Linux
specific rw_semaphore.

JIRA NVGPU-259

Change-Id: I41a38b29d4651838b1962d69f102af1384e12cb6
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1579935
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Terje Bergstrom
2017-10-16 14:58:17 -07:00
committed by mobile promotions
parent 8f55976d49
commit e039dcbc9d
6 changed files with 36 additions and 35 deletions

View File

@@ -1356,14 +1356,14 @@ bool gk20a_fifo_error_tsg(struct gk20a *g,
struct channel_gk20a *ch = NULL;
bool verbose = false;
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch)) {
verbose |= gk20a_fifo_error_ch(g, ch);
gk20a_channel_put(ch);
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
return verbose;
@@ -1386,14 +1386,14 @@ void gk20a_fifo_set_ctx_mmu_error_tsg(struct gk20a *g,
nvgpu_err(g,
"TSG %d generated a mmu fault", tsg->tsgid);
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch)) {
gk20a_fifo_set_ctx_mmu_error_ch(g, ch);
gk20a_channel_put(ch);
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
@@ -1409,7 +1409,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
if (preempt)
g->ops.fifo.preempt_tsg(g, tsgid);
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch)) {
ch->has_timedout = true;
@@ -1417,7 +1417,7 @@ void gk20a_fifo_abort_tsg(struct gk20a *g, u32 tsgid, bool preempt)
gk20a_channel_put(ch);
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
int gk20a_fifo_deferred_reset(struct gk20a *g, struct channel_gk20a *ch)
@@ -1906,7 +1906,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
if (gk20a_is_channel_marked_as_tsg(ch)) {
tsg = &g->fifo.tsg[ch->tsgid];
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch_tsg)) {
@@ -1915,7 +1915,7 @@ int gk20a_fifo_force_reset_ch(struct channel_gk20a *ch,
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, ch->tsgid, verbose);
} else {
gk20a_set_error_notifier(ch, err_code);
@@ -1971,9 +1971,9 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch)
goto fail_enable_tsg;
/* Remove channel from TSG and re-enable rest of the channels */
down_write(&tsg->ch_list_lock);
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_del(&ch->ch_entry);
up_write(&tsg->ch_list_lock);
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
g->ops.fifo.enable_tsg(tsg);
@@ -2084,7 +2084,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
*verbose = false;
*ms = GRFIFO_TIMEOUT_CHECK_PERIOD_US / 1000;
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
/* check if there was some progress on any of the TSG channels.
* fifo recovery is needed if at least one channel reached the
@@ -2140,7 +2140,7 @@ bool gk20a_fifo_check_tsg_ctxsw_timeout(struct tsg_gk20a *tsg,
* of them has reached the timeout, there is nothing more to do:
* timeout_accumulated_ms has been updated for all of them.
*/
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
return recover;
}
@@ -2470,7 +2470,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
struct tsg_gk20a *tsg = &f->tsg[id];
struct channel_gk20a *ch = NULL;
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch)) {
gk20a_set_error_notifier(ch,
@@ -2478,7 +2478,7 @@ static u32 gk20a_fifo_handle_pbdma_intr(struct gk20a *g,
gk20a_channel_put(ch);
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, id, true);
}
}
@@ -2599,7 +2599,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
nvgpu_err(g,
"preempt TSG %d timeout", id);
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (!gk20a_channel_get(ch))
continue;
@@ -2607,7 +2607,7 @@ void __locked_fifo_preempt_timeout_rc(struct gk20a *g, u32 id,
NVGPU_CHANNEL_FIFO_ERROR_IDLE_TIMEOUT);
gk20a_channel_put(ch);
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_recover_tsg(g, id, true);
} else {
struct channel_gk20a *ch = &g->fifo.channel[id];
@@ -3095,7 +3095,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
count++;
(*entries_left)--;
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
/* add runnable channels bound to this TSG */
list_for_each_entry(ch, &tsg->ch_list, ch_entry) {
if (!test_bit(ch->chid,
@@ -3103,7 +3103,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
continue;
if (!(*entries_left)) {
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
return NULL;
}
@@ -3117,7 +3117,7 @@ static u32 *gk20a_runlist_construct_locked(struct fifo_gk20a *f,
runlist_entry += runlist_entry_words;
(*entries_left)--;
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
/* append entries from higher level if this level is empty */

View File

@@ -5091,7 +5091,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
if (gk20a_is_channel_marked_as_tsg(ch)) {
tsg = &g->fifo.tsg[ch->tsgid];
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch_tsg)) {
gk20a_set_error_notifier(ch_tsg,
@@ -5099,7 +5099,7 @@ static void gk20a_gr_set_error_notifier(struct gk20a *g,
gk20a_channel_put(ch_tsg);
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} else {
gk20a_set_error_notifier(ch, error_notifier);
}

View File

@@ -44,7 +44,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
* we first need to enable all channels with NEXT and CTX_RELOAD set,
* and then rest of the channels should be enabled
*/
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
@@ -62,7 +62,7 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
g->ops.fifo.enable_channel(ch);
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
gk20a_fifo_enable_tsg_sched(g, tsg);
@@ -74,11 +74,11 @@ int gk20a_disable_tsg(struct tsg_gk20a *tsg)
struct gk20a *g = tsg->g;
struct channel_gk20a *ch;
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
g->ops.fifo.disable_channel(ch);
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
return 0;
}
@@ -130,9 +130,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
return -EINVAL;
}
down_write(&tsg->ch_list_lock);
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list);
up_write(&tsg->ch_list_lock);
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
nvgpu_ref_get(&tsg->refcount);
@@ -158,9 +158,9 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch)
/* If channel unbind fails, channel is still part of runlist */
channel_gk20a_update_runlist(ch, false);
down_write(&tsg->ch_list_lock);
nvgpu_rwsem_down_write(&tsg->ch_list_lock);
nvgpu_list_del(&ch->ch_entry);
up_write(&tsg->ch_list_lock);
nvgpu_rwsem_up_write(&tsg->ch_list_lock);
}
nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release);
@@ -186,7 +186,7 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
tsg->tsgid = tsgid;
nvgpu_init_list_node(&tsg->ch_list);
init_rwsem(&tsg->ch_list_lock);
nvgpu_rwsem_init(&tsg->ch_list_lock);
nvgpu_init_list_node(&tsg->event_id_list);
err = nvgpu_mutex_init(&tsg->event_id_list_lock);

View File

@@ -24,6 +24,7 @@
#include <nvgpu/lock.h>
#include <nvgpu/kref.h>
#include <nvgpu/rwsem.h>
#define NVGPU_INVALID_TSG_ID (-1)
@@ -46,7 +47,7 @@ struct tsg_gk20a {
struct nvgpu_list_node ch_list;
int num_active_channels;
struct rw_semaphore ch_list_lock;
struct nvgpu_rwsem ch_list_lock;
unsigned int timeslice_us;
unsigned int timeslice_timeout;

View File

@@ -210,13 +210,13 @@ void gm20b_fifo_tsg_verify_status_ctx_reload(struct channel_gk20a *ch)
/* If CTX_RELOAD is set on a channel, move it to some other channel */
if (gk20a_fifo_channel_status_is_ctx_reload(ch->g, ch->chid)) {
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
nvgpu_list_for_each_entry(temp_ch, &tsg->ch_list, channel_gk20a, ch_entry) {
if (temp_ch->chid != ch->chid) {
gm20b_fifo_set_ctx_reload(temp_ch);
break;
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
}
}

View File

@@ -712,7 +712,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
if (gk20a_is_channel_marked_as_tsg(ch)) {
tsg = &g->fifo.tsg[ch->tsgid];
down_read(&tsg->ch_list_lock);
nvgpu_rwsem_down_read(&tsg->ch_list_lock);
list_for_each_entry(ch_tsg, &tsg->ch_list, ch_entry) {
if (gk20a_channel_get(ch_tsg)) {
@@ -722,7 +722,7 @@ int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
}
}
up_read(&tsg->ch_list_lock);
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
} else {
gk20a_set_error_notifier(ch, err_code);
ch->has_timedout = true;