gpu: nvgpu: Use runlist struct in construction APIs

Use a struct nvgpu_runlist pointer for the runlist update and
construction APIs.

This gets rid of the runlist ID being passed into the runlist
code for most of the normal APIs. Some recovery and suspect APIs
still use runlist ID masks since they may work with multiple
runlists at a time. These will be updated in the future.

Jira NVGPU-6425

Change-Id: Ib8d7a6aad0201af62267099cd993d130504478e8
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470307
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Tejal Kudav <tkudav@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Alex Waterman
2021-01-01 22:07:36 -06:00
committed by mobile promotions
parent 9ff5b779c9
commit c55f7d624c
3 changed files with 34 additions and 46 deletions

View File

@@ -303,8 +303,6 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
{ {
u32 *runlist_entry_base = runlist->mem[buf_id].cpu_va; u32 *runlist_entry_base = runlist->mem[buf_id].cpu_va;
nvgpu_log_fn(f->g, " ");
/* /*
* The entry pointer and capacity counter that live on the stack here * The entry pointer and capacity counter that live on the stack here
* keep track of the current position and the remaining space when tsg * keep track of the current position and the remaining space when tsg
@@ -319,14 +317,12 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
} }
} }
static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id, static bool nvgpu_runlist_modify_active_locked(struct gk20a *g,
struct nvgpu_runlist *runlist,
struct nvgpu_channel *ch, bool add) struct nvgpu_channel *ch, bool add)
{ {
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist *runlist = NULL;
struct nvgpu_tsg *tsg = NULL; struct nvgpu_tsg *tsg = NULL;
runlist = f->runlists[runlist_id];
tsg = nvgpu_tsg_from_ch(ch); tsg = nvgpu_tsg_from_ch(ch);
if (tsg == NULL) { if (tsg == NULL) {
@@ -368,21 +364,22 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
return true; return true;
} }
static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id, static int nvgpu_runlist_reconstruct_locked(struct gk20a *g,
struct nvgpu_runlist *runlist,
u32 buf_id, bool add_entries) u32 buf_id, bool add_entries)
{ {
u32 num_entries;
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist *runlist = NULL;
runlist = f->runlists[runlist_id]; rl_dbg(g, "[%u] switch to new buffer 0x%16llx",
runlist->id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id]));
nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx", if (!add_entries) {
runlist_id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id])); runlist->count = 0;
return 0;
}
if (add_entries) { num_entries = nvgpu_runlist_construct_locked(f, runlist, buf_id,
u32 num_entries = nvgpu_runlist_construct_locked(f,
runlist,
buf_id,
f->num_runlist_entries); f->num_runlist_entries);
if (num_entries == RUNLIST_APPEND_FAILURE) { if (num_entries == RUNLIST_APPEND_FAILURE) {
return -E2BIG; return -E2BIG;
@@ -395,26 +392,20 @@ NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6)) NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
} else {
runlist->count = 0;
}
return 0; return 0;
} }
int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, bool add, struct nvgpu_channel *ch, bool add,
bool wait_for_finish) bool wait_for_finish)
{ {
int ret = 0; int ret = 0;
struct nvgpu_fifo *f = &g->fifo;
struct nvgpu_runlist *runlist = NULL;
u32 buf_id; u32 buf_id;
bool add_entries; bool add_entries;
if (ch != NULL) { if (ch != NULL) {
bool update = gk20a_runlist_modify_active_locked(g, runlist_id, bool update = nvgpu_runlist_modify_active_locked(g, rl, ch, add);
ch, add);
if (!update) { if (!update) {
/* no change in runlist contents */ /* no change in runlist contents */
return 0; return 0;
@@ -426,23 +417,21 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
add_entries = add; add_entries = add;
} }
runlist = f->runlists[runlist_id];
/* double buffering, swap to next */ /* double buffering, swap to next */
buf_id = (runlist->cur_buffer == 0U) ? 1U : 0U; buf_id = (rl->cur_buffer == 0U) ? 1U : 0U;
ret = gk20a_runlist_reconstruct_locked(g, runlist_id, buf_id, ret = nvgpu_runlist_reconstruct_locked(g, rl, buf_id, add_entries);
add_entries);
if (ret != 0) { if (ret != 0) {
return ret; return ret;
} }
g->ops.runlist.hw_submit(g, runlist_id, runlist->count, buf_id); g->ops.runlist.hw_submit(g, rl->id, rl->count, buf_id);
if (wait_for_finish) { if (wait_for_finish) {
ret = g->ops.runlist.wait_pending(g, runlist_id); ret = g->ops.runlist.wait_pending(g, rl->id);
if (ret == -ETIMEDOUT) { if (ret == -ETIMEDOUT) {
nvgpu_err(g, "runlist %d update timeout", runlist_id); nvgpu_err(g, "runlist %d update timeout", rl->id);
/* trigger runlist update timeout recovery */ /* trigger runlist update timeout recovery */
return ret; return ret;
@@ -453,7 +442,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
} }
} }
runlist->cur_buffer = buf_id; rl->cur_buffer = buf_id;
return ret; return ret;
} }
@@ -529,8 +518,7 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu, mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
PMU_MUTEX_ID_FIFO, &token); PMU_MUTEX_ID_FIFO, &token);
#endif #endif
ret = nvgpu_runlist_update_locked(g, rl->id, ch, add, ret = nvgpu_runlist_update_locked(g, rl, ch, add, wait_for_finish);
wait_for_finish);
#ifdef CONFIG_NVGPU_LS_PMU #ifdef CONFIG_NVGPU_LS_PMU
if (mutex_ret == 0) { if (mutex_ret == 0) {
if (nvgpu_pmu_lock_release(g, g->pmu, if (nvgpu_pmu_lock_release(g, g->pmu,

View File

@@ -118,7 +118,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
* the update to finish on hw. * the update to finish on hw.
*/ */
err = nvgpu_runlist_update_locked(g, err = nvgpu_runlist_update_locked(g,
runlist->id, NULL, false, false); runlist, NULL, false, false);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "runlist id %d is not cleaned up", nvgpu_err(g, "runlist id %d is not cleaned up",
runlist->id); runlist->id);

View File

@@ -135,8 +135,8 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
* @brief Add/remove channel to/from runlist (locked) * @brief Add/remove channel to/from runlist (locked)
* *
* @param g [in] The GPU driver struct owning this runlist. * @param g [in] The GPU driver struct owning this runlist.
* @param runlist_id [in] Runlist identifier. * @param runlist [in] Runlist object to be modified.
* @param ch [in] Channel to be added/removed or NULL. * @param ch [in] Channel to be added/removed or NULL to update all.
* @param add [in] True to add a channel, false to remove it. * @param add [in] True to add a channel, false to remove it.
* @param wait_for_finish [in] True to wait for runlist update completion. * @param wait_for_finish [in] True to wait for runlist update completion.
* *
@@ -151,7 +151,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
* @retval -E2BIG in case there are not enough entries in runlist buffer to * @retval -E2BIG in case there are not enough entries in runlist buffer to
* describe all active channels and TSGs. * describe all active channels and TSGs.
*/ */
int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id, int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
struct nvgpu_channel *ch, bool add, bool wait_for_finish); struct nvgpu_channel *ch, bool add, bool wait_for_finish);
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING