gpu: nvgpu: remove nvgpu_next files

Remove all nvgpu_next files and move the code into corresponding
nvgpu files.

Merge nvgpu-next-*.yaml into nvgpu-.yaml files.

Jira NVGPU-4771

Change-Id: I595311be3c7bbb4f6314811e68712ff01763801e
Signed-off-by: Antony Clince Alex <aalex@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2547557
Reviewed-by: svc_kernel_abi <svc_kernel_abi@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Antony Clince Alex
2021-06-19 10:17:51 +00:00
committed by mobile promotions
parent c7d43f5292
commit f9cac0c64d
126 changed files with 2351 additions and 4554 deletions

View File

@@ -26,6 +26,7 @@
#include <nvgpu/engines.h>
#include <nvgpu/device.h>
#include <nvgpu/runlist.h>
#include <nvgpu/pbdma.h>
#include <nvgpu/ptimer.h>
#include <nvgpu/bug.h>
#include <nvgpu/dma.h>
@@ -912,3 +913,92 @@ void nvgpu_runlist_unlock_runlists(struct gk20a *g, u32 runlists_mask)
}
}
}
#if defined(CONFIG_NVGPU_NON_FUSA)
static void nvgpu_runlist_init_engine_info(struct gk20a *g,
struct nvgpu_runlist *runlist,
const struct nvgpu_device *dev)
{
u32 i = 0U;
/*
* runlist_pri_base, chram_bar0_offset and pbdma_info
* will get over-written with same info, if multiple engines
* are present on same runlist. Required optimization will be
* done as part of JIRA NVGPU-4980
*/
runlist->nvgpu_next.runlist_pri_base =
dev->next.rl_pri_base;
runlist->nvgpu_next.chram_bar0_offset =
g->ops.runlist.get_chram_bar0_offset(g, dev->next.rl_pri_base);
nvgpu_log(g, gpu_dbg_info, "runlist[%d]: runlist_pri_base 0x%x",
runlist->id, runlist->nvgpu_next.runlist_pri_base);
nvgpu_log(g, gpu_dbg_info, "runlist[%d]: chram_bar0_offset 0x%x",
runlist->id, runlist->nvgpu_next.chram_bar0_offset);
runlist->nvgpu_next.pbdma_info = &dev->next.pbdma_info;
for (i = 0U; i < PBDMA_PER_RUNLIST_SIZE; i++) {
nvgpu_log(g, gpu_dbg_info,
"runlist[%d]: pbdma_id[%d] %d pbdma_pri_base[%d] 0x%x",
runlist->id, i,
runlist->nvgpu_next.pbdma_info->pbdma_id[i], i,
runlist->nvgpu_next.pbdma_info->pbdma_pri_base[i]);
}
runlist->nvgpu_next.rl_dev_list[dev->next.rleng_id] = dev;
}
static u32 nvgpu_runlist_get_pbdma_mask(struct gk20a *g,
struct nvgpu_runlist *runlist)
{
u32 pbdma_mask = 0U;
u32 i;
u32 pbdma_id;
nvgpu_assert(runlist != NULL);
for ( i = 0U; i < PBDMA_PER_RUNLIST_SIZE; i++) {
pbdma_id = runlist->nvgpu_next.pbdma_info->pbdma_id[i];
if (pbdma_id != NVGPU_INVALID_PBDMA_ID)
pbdma_mask |= BIT32(pbdma_id);
}
return pbdma_mask;
}
void nvgpu_next_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
{
struct nvgpu_runlist *runlist;
const struct nvgpu_device *dev;
u32 i, j;
nvgpu_log_fn(g, " ");
if (g->is_virtual) {
return;
}
for (i = 0U; i < f->num_runlists; i++) {
runlist = &f->active_runlists[i];
nvgpu_log(g, gpu_dbg_info, "Configuring runlist %u (%u)", runlist->id, i);
for (j = 0U; j < f->num_engines; j++) {
dev = f->active_engines[j];
if (dev->runlist_id == runlist->id) {
runlist->eng_bitmask |= BIT32(dev->engine_id);
nvgpu_runlist_init_engine_info(g, runlist, dev);
}
}
runlist->pbdma_bitmask = nvgpu_runlist_get_pbdma_mask(g, runlist);
nvgpu_log(g, gpu_dbg_info, " Active engine bitmask: 0x%x", runlist->eng_bitmask);
nvgpu_log(g, gpu_dbg_info, " PBDMA bitmask: 0x%x", runlist->pbdma_bitmask);
}
nvgpu_log_fn(g, "done");
}
#endif