mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Rename runlist_id to id
Rename the runlist_id field in struct nvgpu_runlist to just id. The runlist part is redundant given that this id is already in 'struct nvgpu_runlist'. Change-Id: Ie2ea98f65d75e5e46430734bd7a7f6d6267c7577 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470306 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Tejal Kudav <tkudav@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
bd1b395b5c
commit
5bf229dcd5
@@ -1902,7 +1902,7 @@ int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g)
|
||||
|
||||
channels_in_use = true;
|
||||
|
||||
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
|
||||
active_runlist_ids |= BIT32(ch->runlist->id);
|
||||
}
|
||||
|
||||
nvgpu_channel_put(ch);
|
||||
@@ -1939,7 +1939,7 @@ int nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g)
|
||||
nvgpu_log_info(g, "resume channel %d", chid);
|
||||
g->ops.channel.bind(ch);
|
||||
channels_in_use = true;
|
||||
active_runlist_ids |= BIT32(ch->runlist->runlist_id);
|
||||
active_runlist_ids |= BIT32(ch->runlist->id);
|
||||
}
|
||||
nvgpu_channel_put(ch);
|
||||
}
|
||||
|
||||
@@ -54,7 +54,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
|
||||
nvgpu_mutex_acquire(&tsg->runlist->runlist_lock);
|
||||
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->runlist_id),
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
||||
RUNLIST_DISABLED);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||
@@ -77,7 +77,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->runlist_id),
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
||||
RUNLIST_ENABLED);
|
||||
|
||||
nvgpu_mutex_release(&tsg->runlist->runlist_lock);
|
||||
@@ -164,11 +164,11 @@ void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask)
|
||||
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((BIT32(runlist->runlist_id) & runlists_bitmask) == 0U) {
|
||||
if ((BIT32(runlist->id) & runlists_bitmask) == 0U) {
|
||||
continue;
|
||||
}
|
||||
/* issue runlist preempt */
|
||||
g->ops.fifo.preempt_trigger(g, runlist->runlist_id,
|
||||
g->ops.fifo.preempt_trigger(g, runlist->id,
|
||||
ID_TYPE_RUNLIST);
|
||||
#ifdef CONFIG_NVGPU_RECOVERY
|
||||
/*
|
||||
|
||||
@@ -481,7 +481,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
#endif
|
||||
|
||||
g->ops.runlist.hw_submit(
|
||||
g, runlist->runlist_id, runlist->count, runlist->cur_buffer);
|
||||
g, runlist->id, runlist->count, runlist->cur_buffer);
|
||||
|
||||
if (preempt_next) {
|
||||
if (g->ops.runlist.reschedule_preempt_next_locked(ch,
|
||||
@@ -490,9 +490,9 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
}
|
||||
}
|
||||
|
||||
if (g->ops.runlist.wait_pending(g, runlist->runlist_id) != 0) {
|
||||
if (g->ops.runlist.wait_pending(g, runlist->id) != 0) {
|
||||
nvgpu_err(g, "wait pending failed for runlist %u",
|
||||
runlist->runlist_id);
|
||||
runlist->id);
|
||||
}
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (mutex_ret == 0) {
|
||||
@@ -529,7 +529,7 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
#endif
|
||||
ret = nvgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
|
||||
ret = nvgpu_runlist_update_locked(g, rl->id, ch, add,
|
||||
wait_for_finish);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (mutex_ret == 0) {
|
||||
@@ -542,7 +542,7 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
nvgpu_mutex_release(&rl->runlist_lock);
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
nvgpu_rc_runlist_update(g, rl->runlist_id);
|
||||
nvgpu_rc_runlist_update(g, rl->id);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -666,7 +666,7 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
|
||||
runlist->active_tsgs = NULL;
|
||||
|
||||
nvgpu_mutex_destroy(&runlist->runlist_lock);
|
||||
f->runlists[runlist->runlist_id] = NULL;
|
||||
f->runlists[runlist->id] = NULL;
|
||||
}
|
||||
|
||||
nvgpu_kfree(g, f->active_runlists);
|
||||
@@ -693,20 +693,20 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
(void) g->ops.fifo.find_pbdma_for_runlist(g,
|
||||
runlist->runlist_id,
|
||||
runlist->id,
|
||||
&runlist->pbdma_bitmask);
|
||||
nvgpu_log(g, gpu_dbg_info, "runlist %d: pbdma bitmask 0x%x",
|
||||
runlist->runlist_id, runlist->pbdma_bitmask);
|
||||
runlist->id, runlist->pbdma_bitmask);
|
||||
|
||||
for (j = 0; j < f->num_engines; j++) {
|
||||
dev = f->active_engines[j];
|
||||
|
||||
if (dev->runlist_id == runlist->runlist_id) {
|
||||
if (dev->runlist_id == runlist->id) {
|
||||
runlist->eng_bitmask |= BIT32(dev->engine_id);
|
||||
}
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_info, "runlist %d: act eng bitmask 0x%x",
|
||||
runlist->runlist_id, runlist->eng_bitmask);
|
||||
runlist->id, runlist->eng_bitmask);
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
@@ -740,7 +740,7 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
rl_dbg(g, " SW runlist index to HW: %u -> %u", i, runlist_id);
|
||||
|
||||
runlist = &f->active_runlists[i];
|
||||
runlist->runlist_id = runlist_id;
|
||||
runlist->id = runlist_id;
|
||||
f->runlists[runlist_id] = runlist;
|
||||
i = nvgpu_safe_add_u32(i, 1U);
|
||||
|
||||
@@ -865,11 +865,11 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
runlists_mask |= BIT32(runlist->id);
|
||||
}
|
||||
|
||||
if ((runlist->pbdma_bitmask & pbdma_bitmask) != 0U) {
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
runlists_mask |= BIT32(runlist->id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -885,7 +885,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
/* Warning on Linux, real assert on QNX. */
|
||||
nvgpu_assert(runlist != NULL);
|
||||
} else {
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
runlists_mask |= BIT32(runlist->id);
|
||||
}
|
||||
} else {
|
||||
if (bitmask_disabled) {
|
||||
@@ -895,7 +895,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
runlists_mask |= BIT32(runlist->id);
|
||||
}
|
||||
} else {
|
||||
nvgpu_log(g, gpu_dbg_info, "id_type_unknown, engine "
|
||||
|
||||
@@ -122,8 +122,8 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
|
||||
if (tsg->runlist != ch->runlist) {
|
||||
nvgpu_err(tsg->g,
|
||||
"runlist_id mismatch ch[%d] tsg[%d]",
|
||||
ch->runlist->runlist_id,
|
||||
tsg->runlist->runlist_id);
|
||||
ch->runlist->id,
|
||||
tsg->runlist->id);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -82,7 +82,7 @@ int vgpu_channel_alloc_inst(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
msg.cmd = TEGRA_VGPU_CMD_CHANNEL_ALLOC_HWCTX;
|
||||
msg.handle = vgpu_get_handle(g);
|
||||
p->id = ch->chid;
|
||||
p->runlist_id = ch->runlist->runlist_id;
|
||||
p->runlist_id = ch->runlist->id;
|
||||
p->pid = (u64)ch->pid;
|
||||
err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
|
||||
if (err || msg.ret) {
|
||||
|
||||
@@ -173,7 +173,7 @@ static int vgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
|
||||
nvgpu_mutex_acquire(&rl->runlist_lock);
|
||||
|
||||
ret = vgpu_runlist_update_locked(g, rl->runlist_id, ch, add,
|
||||
ret = vgpu_runlist_update_locked(g, rl->id, ch, add,
|
||||
wait_for_finish);
|
||||
|
||||
nvgpu_mutex_release(&rl->runlist_lock);
|
||||
|
||||
@@ -41,7 +41,7 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
return;
|
||||
}
|
||||
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->runlist_id),
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
||||
RUNLIST_DISABLED);
|
||||
|
||||
/*
|
||||
@@ -73,6 +73,6 @@ void gk20a_tsg_enable(struct nvgpu_tsg *tsg)
|
||||
}
|
||||
nvgpu_rwsem_up_read(&tsg->ch_list_lock);
|
||||
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->runlist_id),
|
||||
nvgpu_runlist_set_state(g, BIT32(tsg->runlist->id),
|
||||
RUNLIST_ENABLED);
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ void gv11b_tsg_bind_channel_eng_method_buffers(struct nvgpu_tsg *tsg,
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsg->runlist->runlist_id == nvgpu_engine_get_fast_ce_runlist_id(g)) {
|
||||
if (tsg->runlist->id == nvgpu_engine_get_fast_ce_runlist_id(g)) {
|
||||
gpu_va = tsg->eng_method_buffers[ASYNC_CE_RUNQUE].gpu_va;
|
||||
} else {
|
||||
gpu_va = tsg->eng_method_buffers[GR_RUNQUE].gpu_va;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -60,13 +60,13 @@ u32 tu104_usermode_doorbell_token(struct nvgpu_channel *ch)
|
||||
u32 hw_chid = f->channel_base + ch->chid;
|
||||
|
||||
return ctrl_doorbell_vector_f(hw_chid) |
|
||||
ctrl_doorbell_runlist_id_f(ch->runlist->runlist_id);
|
||||
ctrl_doorbell_runlist_id_f(ch->runlist->id);
|
||||
}
|
||||
|
||||
void tu104_usermode_ring_doorbell(struct nvgpu_channel *ch)
|
||||
{
|
||||
nvgpu_log_info(ch->g, "channel ring door bell %d, runlist %d",
|
||||
ch->chid, ch->runlist->runlist_id);
|
||||
ch->chid, ch->runlist->id);
|
||||
|
||||
nvgpu_usermode_writel(ch->g, func_doorbell_r(),
|
||||
ch->g->ops.usermode.doorbell_token(ch));
|
||||
|
||||
@@ -74,11 +74,11 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((runlists_mask & BIT32(runlist->runlist_id)) == 0U) {
|
||||
if ((runlists_mask & BIT32(runlist->id)) == 0U) {
|
||||
continue;
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_info, "abort runlist id %d",
|
||||
runlist->runlist_id);
|
||||
runlist->id);
|
||||
|
||||
for_each_set_bit(tsgid, runlist->active_tsgs, f->num_channels) {
|
||||
tsg = &g->fifo.tsg[tsgid];
|
||||
@@ -118,10 +118,10 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
* the update to finish on hw.
|
||||
*/
|
||||
err = nvgpu_runlist_update_locked(g,
|
||||
runlist->runlist_id, NULL, false, false);
|
||||
runlist->id, NULL, false, false);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "runlist id %d is not cleaned up",
|
||||
runlist->runlist_id);
|
||||
runlist->id);
|
||||
}
|
||||
|
||||
nvgpu_tsg_abort(g, tsg, false);
|
||||
@@ -322,14 +322,14 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if (((runlists_mask & BIT32(runlist->runlist_id)) == 0U) ||
|
||||
if (((runlists_mask & BIT32(runlist->id)) == 0U) ||
|
||||
(runlist->reset_eng_bitmask == 0U)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
bitmask = runlist->reset_eng_bitmask;
|
||||
rec_dbg(g, " Engine bitmask for RL %u: 0x%lx",
|
||||
runlist->runlist_id, bitmask);
|
||||
runlist->id, bitmask);
|
||||
|
||||
for_each_set_bit(bit, &bitmask, f->max_engines) {
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ struct nvgpu_channel;
|
||||
|
||||
struct nvgpu_runlist {
|
||||
/** Runlist identifier. */
|
||||
u32 runlist_id;
|
||||
u32 id;
|
||||
/** Bitmap of active channels in the runlist. One bit per chid. */
|
||||
unsigned long *active_channels;
|
||||
/** Bitmap of active TSGs in the runlist. One bit per tsgid. */
|
||||
|
||||
Reference in New Issue
Block a user