mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: Use runlist struct in construction APIs
Use a struct nvgpu_runlist pointer for the runlist update and construction APIs. This gets rid of the runlist ID being passed into the runlist code for most of the normal APIs. Some recovery and suspect APIs still use runlist ID masks since they may work with multiple runlists at a time. These will be updated in the future. Jira NVGPU-6425 Change-Id: Ib8d7a6aad0201af62267099cd993d130504478e8 Signed-off-by: Alex Waterman <alexw@nvidia.com> Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470307 Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-by: Tejal Kudav <tkudav@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9ff5b779c9
commit
c55f7d624c
@@ -303,8 +303,6 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
{
|
||||
u32 *runlist_entry_base = runlist->mem[buf_id].cpu_va;
|
||||
|
||||
nvgpu_log_fn(f->g, " ");
|
||||
|
||||
/*
|
||||
* The entry pointer and capacity counter that live on the stack here
|
||||
* keep track of the current position and the remaining space when tsg
|
||||
@@ -319,14 +317,12 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
}
|
||||
}
|
||||
|
||||
static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
static bool nvgpu_runlist_modify_active_locked(struct gk20a *g,
|
||||
struct nvgpu_runlist *runlist,
|
||||
struct nvgpu_channel *ch, bool add)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_tsg *tsg = NULL;
|
||||
|
||||
runlist = f->runlists[runlist_id];
|
||||
tsg = nvgpu_tsg_from_ch(ch);
|
||||
|
||||
if (tsg == NULL) {
|
||||
@@ -368,21 +364,22 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
|
||||
static int nvgpu_runlist_reconstruct_locked(struct gk20a *g,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 buf_id, bool add_entries)
|
||||
{
|
||||
u32 num_entries;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
|
||||
runlist = f->runlists[runlist_id];
|
||||
rl_dbg(g, "[%u] switch to new buffer 0x%16llx",
|
||||
runlist->id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id]));
|
||||
|
||||
nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
|
||||
runlist_id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id]));
|
||||
if (!add_entries) {
|
||||
runlist->count = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (add_entries) {
|
||||
u32 num_entries = nvgpu_runlist_construct_locked(f,
|
||||
runlist,
|
||||
buf_id,
|
||||
num_entries = nvgpu_runlist_construct_locked(f, runlist, buf_id,
|
||||
f->num_runlist_entries);
|
||||
if (num_entries == RUNLIST_APPEND_FAILURE) {
|
||||
return -E2BIG;
|
||||
@@ -395,26 +392,20 @@ NVGPU_COV_WHITELIST_BLOCK_BEGIN(false_positive, 1, NVGPU_MISRA(Rule, 15_6), "Bug
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 10_3))
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 14_4))
|
||||
NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
|
||||
} else {
|
||||
runlist->count = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add,
|
||||
bool wait_for_finish)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u32 buf_id;
|
||||
bool add_entries;
|
||||
|
||||
if (ch != NULL) {
|
||||
bool update = gk20a_runlist_modify_active_locked(g, runlist_id,
|
||||
ch, add);
|
||||
bool update = nvgpu_runlist_modify_active_locked(g, rl, ch, add);
|
||||
if (!update) {
|
||||
/* no change in runlist contents */
|
||||
return 0;
|
||||
@@ -426,23 +417,21 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
add_entries = add;
|
||||
}
|
||||
|
||||
runlist = f->runlists[runlist_id];
|
||||
/* double buffering, swap to next */
|
||||
buf_id = (runlist->cur_buffer == 0U) ? 1U : 0U;
|
||||
buf_id = (rl->cur_buffer == 0U) ? 1U : 0U;
|
||||
|
||||
ret = gk20a_runlist_reconstruct_locked(g, runlist_id, buf_id,
|
||||
add_entries);
|
||||
ret = nvgpu_runlist_reconstruct_locked(g, rl, buf_id, add_entries);
|
||||
if (ret != 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
g->ops.runlist.hw_submit(g, runlist_id, runlist->count, buf_id);
|
||||
g->ops.runlist.hw_submit(g, rl->id, rl->count, buf_id);
|
||||
|
||||
if (wait_for_finish) {
|
||||
ret = g->ops.runlist.wait_pending(g, runlist_id);
|
||||
ret = g->ops.runlist.wait_pending(g, rl->id);
|
||||
|
||||
if (ret == -ETIMEDOUT) {
|
||||
nvgpu_err(g, "runlist %d update timeout", runlist_id);
|
||||
nvgpu_err(g, "runlist %d update timeout", rl->id);
|
||||
/* trigger runlist update timeout recovery */
|
||||
return ret;
|
||||
|
||||
@@ -453,7 +442,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
}
|
||||
}
|
||||
|
||||
runlist->cur_buffer = buf_id;
|
||||
rl->cur_buffer = buf_id;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -529,8 +518,7 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
mutex_ret = nvgpu_pmu_lock_acquire(g, g->pmu,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
#endif
|
||||
ret = nvgpu_runlist_update_locked(g, rl->id, ch, add,
|
||||
wait_for_finish);
|
||||
ret = nvgpu_runlist_update_locked(g, rl, ch, add, wait_for_finish);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
if (mutex_ret == 0) {
|
||||
if (nvgpu_pmu_lock_release(g, g->pmu,
|
||||
|
||||
@@ -118,7 +118,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
* the update to finish on hw.
|
||||
*/
|
||||
err = nvgpu_runlist_update_locked(g,
|
||||
runlist->id, NULL, false, false);
|
||||
runlist, NULL, false, false);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "runlist id %d is not cleaned up",
|
||||
runlist->id);
|
||||
|
||||
@@ -135,8 +135,8 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
* @brief Add/remove channel to/from runlist (locked)
|
||||
*
|
||||
* @param g [in] The GPU driver struct owning this runlist.
|
||||
* @param runlist_id [in] Runlist identifier.
|
||||
* @param ch [in] Channel to be added/removed or NULL.
|
||||
* @param runlist [in] Runlist object to be modified.
|
||||
* @param ch [in] Channel to be added/removed or NULL to update all.
|
||||
* @param add [in] True to add a channel, false to remove it.
|
||||
* @param wait_for_finish [in] True to wait for runlist update completion.
|
||||
*
|
||||
@@ -151,7 +151,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
* @retval -E2BIG in case there are not enough entries in runlist buffer to
|
||||
* describe all active channels and TSGs.
|
||||
*/
|
||||
int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish);
|
||||
|
||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
||||
|
||||
Reference in New Issue
Block a user