mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: Rename struct nvgpu_runlist_info, fields in fifo
Rename struct nvgpu_runlist_info to struct nvgpu_runlist; the info is not necessary. struct nvgpu_runlist is soon to be a first class object among the nvgpu object model. Also rename the fields runlist_info and active_runlist_info to simply runlists and active_runlists respectively. Again the info text is just not necessary and somewhat misleading. These structs _are_ the runlist representations in SW; they are not merely informational. Also add an rl_dbg() macro to print debug info specific to runlist management and some debug prints specifying the runlist topology for the running chip. Change-Id: Id9fcbdd1a7227cb5f8c75cca4abbff94fe048e49 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470303 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a4dc48061c
commit
11d3785faf
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -239,7 +239,7 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
|
||||
nvgpu_runlist_set_state(g, BIT32(dev->runlist_id),
|
||||
RUNLIST_DISABLED);
|
||||
|
||||
runlist_served_pbdmas = f->runlist_info[dev->runlist_id]->pbdma_bitmask;
|
||||
runlist_served_pbdmas = f->runlists[dev->runlist_id]->pbdma_bitmask;
|
||||
|
||||
for_each_set_bit(bit, &runlist_served_pbdmas,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -55,7 +55,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
nvgpu_mutex_acquire(&f->runlist_info[runlist_id]->runlist_lock);
|
||||
nvgpu_mutex_acquire(&f->runlists[runlist_id]->runlist_lock);
|
||||
|
||||
/* WAR for Bug 2065990 */
|
||||
nvgpu_tsg_disable_sched(g, tsg);
|
||||
@@ -83,7 +83,7 @@ int nvgpu_fifo_preempt_tsg(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
/* WAR for Bug 2065990 */
|
||||
nvgpu_tsg_enable_sched(g, tsg);
|
||||
|
||||
nvgpu_mutex_release(&f->runlist_info[runlist_id]->runlist_lock);
|
||||
nvgpu_mutex_release(&f->runlists[runlist_id]->runlist_lock);
|
||||
|
||||
if (ret != 0) {
|
||||
if (nvgpu_platform_is_silicon(g)) {
|
||||
@@ -127,7 +127,7 @@ int nvgpu_preempt_poll_tsg_on_pbdma(struct gk20a *g,
|
||||
|
||||
tsgid = tsg->tsgid;
|
||||
runlist_id = tsg->runlist_id;
|
||||
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
||||
runlist_served_pbdmas = f->runlists[runlist_id]->pbdma_bitmask;
|
||||
|
||||
for_each_set_bit(pbdma_id_bit, &runlist_served_pbdmas,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||
@@ -166,9 +166,9 @@ void nvgpu_fifo_preempt_runlists_for_rc(struct gk20a *g, u32 runlists_bitmask)
|
||||
#endif
|
||||
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((BIT32(runlist->runlist_id) & runlists_bitmask) == 0U) {
|
||||
continue;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -38,12 +38,12 @@
|
||||
void nvgpu_runlist_lock_active_runlists(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_info(g, "acquire runlist_lock for active runlists");
|
||||
for (i = 0; i < g->fifo.num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
}
|
||||
}
|
||||
@@ -51,18 +51,18 @@ void nvgpu_runlist_lock_active_runlists(struct gk20a *g)
|
||||
void nvgpu_runlist_unlock_active_runlists(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_info(g, "release runlist_lock for active runlists");
|
||||
for (i = 0; i < g->fifo.num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left,
|
||||
struct nvgpu_tsg *tsg)
|
||||
@@ -129,7 +129,7 @@ static u32 nvgpu_runlist_append_tsg(struct gk20a *g,
|
||||
|
||||
|
||||
static u32 nvgpu_runlist_append_prio(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left,
|
||||
u32 interleave_level)
|
||||
@@ -157,7 +157,7 @@ static u32 nvgpu_runlist_append_prio(struct nvgpu_fifo *f,
|
||||
}
|
||||
|
||||
static u32 nvgpu_runlist_append_hi(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left)
|
||||
{
|
||||
@@ -173,7 +173,7 @@ static u32 nvgpu_runlist_append_hi(struct nvgpu_fifo *f,
|
||||
}
|
||||
|
||||
static u32 nvgpu_runlist_append_med(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left)
|
||||
{
|
||||
@@ -212,7 +212,7 @@ static u32 nvgpu_runlist_append_med(struct nvgpu_fifo *f,
|
||||
}
|
||||
|
||||
static u32 nvgpu_runlist_append_low(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left)
|
||||
{
|
||||
@@ -272,7 +272,7 @@ static u32 nvgpu_runlist_append_low(struct nvgpu_fifo *f,
|
||||
}
|
||||
|
||||
static u32 nvgpu_runlist_append_flat(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 **runlist_entry,
|
||||
u32 *entries_left)
|
||||
{
|
||||
@@ -297,7 +297,7 @@ static u32 nvgpu_runlist_append_flat(struct nvgpu_fifo *f,
|
||||
}
|
||||
|
||||
u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 buf_id,
|
||||
u32 max_entries)
|
||||
{
|
||||
@@ -323,10 +323,10 @@ static bool gk20a_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch, bool add)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_tsg *tsg = NULL;
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
tsg = nvgpu_tsg_from_ch(ch);
|
||||
|
||||
if (tsg == NULL) {
|
||||
@@ -372,9 +372,9 @@ static int gk20a_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
|
||||
u32 buf_id, bool add_entries)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
nvgpu_log_info(g, "runlist_id : %d, switch to new buffer 0x%16llx",
|
||||
runlist_id, (u64)nvgpu_mem_get_addr(g, &runlist->mem[buf_id]));
|
||||
@@ -408,7 +408,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
{
|
||||
int ret = 0;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u32 buf_id;
|
||||
bool add_entries;
|
||||
|
||||
@@ -426,7 +426,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
add_entries = add;
|
||||
}
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
/* double buffering, swap to next */
|
||||
buf_id = (runlist->cur_buffer == 0U) ? 1U : 0U;
|
||||
|
||||
@@ -464,14 +464,14 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
bool wait_preempt)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = 0;
|
||||
#endif
|
||||
int ret = 0;
|
||||
|
||||
runlist = g->fifo.runlist_info[ch->runlist_id];
|
||||
runlist = g->fifo.runlists[ch->runlist_id];
|
||||
if (nvgpu_mutex_tryacquire(&runlist->runlist_lock) == 0) {
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -516,7 +516,7 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
@@ -526,7 +526,7 @@ static int nvgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
@@ -647,16 +647,16 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 i, j;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
if ((f->runlist_info == NULL) || (f->active_runlist_info == NULL)) {
|
||||
if ((f->runlists == NULL) || (f->active_runlists == NULL)) {
|
||||
return;
|
||||
}
|
||||
|
||||
g = f->g;
|
||||
|
||||
for (i = 0; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) {
|
||||
nvgpu_dma_free(g, &runlist->mem[j]);
|
||||
}
|
||||
@@ -668,20 +668,20 @@ void nvgpu_runlist_cleanup_sw(struct gk20a *g)
|
||||
runlist->active_tsgs = NULL;
|
||||
|
||||
nvgpu_mutex_destroy(&runlist->runlist_lock);
|
||||
f->runlist_info[runlist->runlist_id] = NULL;
|
||||
f->runlists[runlist->runlist_id] = NULL;
|
||||
}
|
||||
|
||||
nvgpu_kfree(g, f->active_runlist_info);
|
||||
f->active_runlist_info = NULL;
|
||||
nvgpu_kfree(g, f->active_runlists);
|
||||
f->active_runlists = NULL;
|
||||
f->num_runlists = 0;
|
||||
nvgpu_kfree(g, f->runlist_info);
|
||||
f->runlist_info = NULL;
|
||||
nvgpu_kfree(g, f->runlists);
|
||||
f->runlists = NULL;
|
||||
f->max_runlists = 0;
|
||||
}
|
||||
|
||||
void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
const struct nvgpu_device *dev;
|
||||
u32 i, j;
|
||||
|
||||
@@ -692,7 +692,7 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
||||
}
|
||||
|
||||
for (i = 0; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
(void) g->ops.fifo.find_pbdma_for_runlist(g,
|
||||
runlist->runlist_id,
|
||||
@@ -716,32 +716,34 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f)
|
||||
|
||||
static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
unsigned int runlist_id;
|
||||
size_t runlist_size;
|
||||
u32 i, j;
|
||||
int err = 0;
|
||||
|
||||
rl_dbg(g, "Building active runlist map.");
|
||||
|
||||
/*
|
||||
* In most case we want to loop through active runlists only. Here
|
||||
* we need to loop through all possible runlists, to build the mapping
|
||||
* between runlist_info[runlist_id] and active_runlist_info[i].
|
||||
* between runlists[runlist_id] and active_runlists[i].
|
||||
*/
|
||||
i = 0U;
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
if (!nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
/* skip inactive runlist */
|
||||
nvgpu_log(g, gpu_dbg_info, "Skipping invalid runlist: %d", runlist_id);
|
||||
rl_dbg(g, " Skipping invalid runlist: %d", runlist_id);
|
||||
continue;
|
||||
|
||||
}
|
||||
|
||||
nvgpu_log(g, gpu_dbg_info, "Configuring runlist: %d", runlist_id);
|
||||
rl_dbg(g, " Configuring HW runlist: %u", runlist_id);
|
||||
rl_dbg(g, " SW runlist index to HW: %u -> %u", i, runlist_id);
|
||||
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
runlist->runlist_id = runlist_id;
|
||||
f->runlist_info[runlist_id] = runlist;
|
||||
f->runlists[runlist_id] = runlist;
|
||||
i = nvgpu_safe_add_u32(i, 1U);
|
||||
|
||||
runlist->active_channels =
|
||||
@@ -762,8 +764,8 @@ static int nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
|
||||
runlist_size = (size_t)f->runlist_entry_size *
|
||||
(size_t)f->num_runlist_entries;
|
||||
nvgpu_log(g, gpu_dbg_info, " RL entries: %d", f->num_runlist_entries);
|
||||
nvgpu_log(g, gpu_dbg_info, " RL size %zu", runlist_size);
|
||||
rl_dbg(g, " RL entries: %d", f->num_runlist_entries);
|
||||
rl_dbg(g, " RL size %zu", runlist_size);
|
||||
|
||||
for (j = 0; j < MAX_RUNLIST_BUFFERS; j++) {
|
||||
err = nvgpu_dma_alloc_flags_sys(g,
|
||||
@@ -800,16 +802,17 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
unsigned int runlist_id;
|
||||
int err = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
rl_dbg(g, "Initializing Runlists");
|
||||
|
||||
nvgpu_spinlock_init(&f->runlist_submit_lock);
|
||||
|
||||
f->runlist_entry_size = g->ops.runlist.entry_size(g);
|
||||
f->num_runlist_entries = g->ops.runlist.length_max(g);
|
||||
f->max_runlists = g->ops.runlist.count_max(g);
|
||||
f->runlist_info = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
|
||||
sizeof(*f->runlist_info), f->max_runlists));
|
||||
if (f->runlist_info == NULL) {
|
||||
|
||||
f->runlists = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
|
||||
sizeof(*f->runlists), f->max_runlists));
|
||||
if (f->runlists == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
@@ -817,18 +820,22 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) {
|
||||
if (nvgpu_engine_is_valid_runlist_id(g, runlist_id)) {
|
||||
num_runlists = nvgpu_safe_add_u32(num_runlists, 1U);
|
||||
nvgpu_log(g, gpu_dbg_info, "Valid runlist: %d", runlist_id);
|
||||
}
|
||||
}
|
||||
f->num_runlists = num_runlists;
|
||||
|
||||
f->active_runlist_info = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
|
||||
sizeof(*f->active_runlist_info), num_runlists));
|
||||
if (f->active_runlist_info == NULL) {
|
||||
f->active_runlists = nvgpu_kzalloc(g, nvgpu_safe_mult_u64(
|
||||
sizeof(*f->active_runlists), num_runlists));
|
||||
if (f->active_runlists == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
nvgpu_log(g, gpu_dbg_info, "num_runlists: %u", num_runlists);
|
||||
|
||||
|
||||
rl_dbg(g, " Max runlists: %u", f->max_runlists);
|
||||
rl_dbg(g, " Active runlists: %u", f->num_runlists);
|
||||
rl_dbg(g, " RL entry size: %u bytes", f->runlist_entry_size);
|
||||
rl_dbg(g, " Max RL entries: %u", f->num_runlist_entries);
|
||||
|
||||
err = nvgpu_init_active_runlist_mapping(g);
|
||||
if (err != 0) {
|
||||
@@ -836,13 +843,11 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
}
|
||||
|
||||
g->ops.runlist.init_enginfo(g, f);
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
|
||||
clean_up_runlist:
|
||||
nvgpu_runlist_cleanup_sw(g);
|
||||
nvgpu_log_fn(g, "fail");
|
||||
rl_dbg(g, "fail");
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -851,7 +856,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
{
|
||||
u32 i, runlists_mask = 0;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
bool bitmask_disabled = ((act_eng_bitmask == 0U) &&
|
||||
(pbdma_bitmask == 0U));
|
||||
@@ -859,7 +864,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
/* engine and/or pbdma ids are known */
|
||||
if (!bitmask_disabled) {
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((runlist->eng_bitmask & act_eng_bitmask) != 0U) {
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
@@ -883,7 +888,7 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
"and pbdma ids are unknown");
|
||||
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
runlists_mask |= BIT32(runlist->runlist_id);
|
||||
}
|
||||
@@ -900,14 +905,14 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
void nvgpu_runlist_unlock_runlists(struct gk20a *g, u32 runlists_mask)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
u32 i;
|
||||
|
||||
nvgpu_log_info(g, "release runlist_lock for runlists set in "
|
||||
"runlists_mask: 0x%08x", runlists_mask);
|
||||
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((BIT32(i) & runlists_mask) != 0U) {
|
||||
nvgpu_mutex_release(&runlist->runlist_lock);
|
||||
|
||||
@@ -73,11 +73,11 @@ static bool nvgpu_tsg_is_channel_active(struct gk20a *g,
|
||||
struct nvgpu_channel *ch)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < f->num_runlists; ++i) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
if (nvgpu_test_bit(ch->chid, runlist->active_channels)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Virtualized GPU Runlist
|
||||
*
|
||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -77,9 +77,9 @@ static bool vgpu_runlist_modify_active_locked(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch, bool add)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
if (add) {
|
||||
if (nvgpu_test_and_set_bit(ch->chid,
|
||||
@@ -102,9 +102,9 @@ static void vgpu_runlist_reconstruct_locked(struct gk20a *g, u32 runlist_id,
|
||||
bool add_entries)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
if (add_entries) {
|
||||
u16 *runlist_entry;
|
||||
@@ -132,7 +132,7 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
bool wait_for_finish)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
bool add_entries;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
@@ -151,7 +151,7 @@ static int vgpu_runlist_update_locked(struct gk20a *g, u32 runlist_id,
|
||||
add_entries = add;
|
||||
}
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
vgpu_runlist_reconstruct_locked(g, runlist_id, add_entries);
|
||||
|
||||
@@ -167,13 +167,13 @@ static int vgpu_runlist_update(struct gk20a *g, u32 runlist_id,
|
||||
struct nvgpu_channel *ch,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 ret = 0;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
nvgpu_mutex_acquire(&runlist->runlist_lock);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -331,8 +331,8 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
|
||||
nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid);
|
||||
|
||||
runlist_served_pbdmas = f->runlist_info[runlist_id]->pbdma_bitmask;
|
||||
runlist_served_engines = f->runlist_info[runlist_id]->eng_bitmask;
|
||||
runlist_served_pbdmas = f->runlists[runlist_id]->pbdma_bitmask;
|
||||
runlist_served_engines = f->runlists[runlist_id]->eng_bitmask;
|
||||
|
||||
for_each_set_bit(bit, &runlist_served_pbdmas,
|
||||
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA)) {
|
||||
@@ -344,13 +344,13 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
||||
}
|
||||
}
|
||||
|
||||
f->runlist_info[runlist_id]->reset_eng_bitmask = 0U;
|
||||
f->runlists[runlist_id]->reset_eng_bitmask = 0U;
|
||||
|
||||
for_each_set_bit(bit, &runlist_served_engines, f->max_engines) {
|
||||
engine_id = U32(bit);
|
||||
err = gv11b_fifo_preempt_poll_eng(g,
|
||||
tsgid, engine_id,
|
||||
&f->runlist_info[runlist_id]->reset_eng_bitmask);
|
||||
&f->runlists[runlist_id]->reset_eng_bitmask);
|
||||
if ((err != 0) && (ret == 0)) {
|
||||
ret = err;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -52,8 +52,8 @@ int gk20a_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
|
||||
bool wait_preempt)
|
||||
{
|
||||
struct gk20a *g = ch->g;
|
||||
struct nvgpu_runlist_info *runlist =
|
||||
g->fifo.runlist_info[ch->runlist_id];
|
||||
struct nvgpu_runlist *runlist =
|
||||
g->fifo.runlists[ch->runlist_id];
|
||||
int ret = 0;
|
||||
u32 fecsstat0 = 0, fecsstat1 = 0;
|
||||
u32 preempt_id;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -41,10 +41,10 @@ u32 gk20a_runlist_length_max(struct gk20a *g)
|
||||
void gk20a_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
u32 count, u32 buffer_index)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u64 runlist_iova;
|
||||
|
||||
runlist = g->fifo.runlist_info[runlist_id];
|
||||
runlist = g->fifo.runlists[runlist_id];
|
||||
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
|
||||
|
||||
nvgpu_spinlock_acquire(&g->fifo.runlist_submit_lock);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -37,11 +37,11 @@ u32 tu104_runlist_count_max(struct gk20a *g)
|
||||
void tu104_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
|
||||
u32 count, u32 buffer_index)
|
||||
{
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u64 runlist_iova;
|
||||
u32 runlist_iova_lo, runlist_iova_hi;
|
||||
|
||||
runlist = g->fifo.runlist_info[runlist_id];
|
||||
runlist = g->fifo.runlists[runlist_id];
|
||||
runlist_iova = nvgpu_mem_get_addr(g, &runlist->mem[buffer_index]);
|
||||
|
||||
runlist_iova_lo = u64_lo32(runlist_iova) >>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -56,7 +56,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_tsg *tsg = NULL;
|
||||
unsigned long tsgid;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
u32 token = PMU_INVALID_MUTEX_OWNER_ID;
|
||||
int mutex_ret = 0;
|
||||
@@ -72,7 +72,7 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g,
|
||||
PMU_MUTEX_ID_FIFO, &token);
|
||||
#endif
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if ((runlists_mask & BIT32(runlist->runlist_id)) == 0U) {
|
||||
continue;
|
||||
@@ -150,7 +150,7 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
|
||||
unsigned long bit;
|
||||
unsigned long bitmask;
|
||||
u32 pbdma_bitmask = 0U;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u32 engine_id;
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_swprofiler *prof = &f->recovery_profiler;
|
||||
@@ -306,7 +306,7 @@ void gv11b_fifo_recover(struct gk20a *g, u32 act_eng_bitmask,
|
||||
dbg_rec(g, "Resetting relevant engines");
|
||||
/* check if engine reset should be deferred */
|
||||
for (i = 0U; i < f->num_runlists; i++) {
|
||||
runlist = &f->active_runlist_info[i];
|
||||
runlist = &f->active_runlists[i];
|
||||
|
||||
if (((runlists_mask & BIT32(runlist->runlist_id)) == 0U) ||
|
||||
(runlist->reset_eng_bitmask == 0U)) {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* FIFO common definitions.
|
||||
*
|
||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -131,7 +131,7 @@
|
||||
*
|
||||
* TODO
|
||||
*
|
||||
* + struct nvgpu_runlist_info
|
||||
* + struct nvgpu_runlist
|
||||
*
|
||||
* TODO
|
||||
*
|
||||
@@ -225,7 +225,7 @@
|
||||
#define CHANNEL_INFO_VEID0 0U
|
||||
|
||||
struct gk20a;
|
||||
struct nvgpu_runlist_info;
|
||||
struct nvgpu_runlist;
|
||||
struct nvgpu_channel;
|
||||
struct nvgpu_tsg;
|
||||
struct nvgpu_swprofiler;
|
||||
@@ -277,15 +277,15 @@ struct nvgpu_fifo {
|
||||
|
||||
/**
|
||||
* Pointers to runlists, indexed by real hw runlist_id.
|
||||
* If a runlist is active, then runlist_info[runlist_id] points
|
||||
* If a runlist is active, then runlists[runlist_id] points
|
||||
* to one entry in active_runlist_info. Otherwise, it is NULL.
|
||||
*/
|
||||
struct nvgpu_runlist_info **runlist_info;
|
||||
struct nvgpu_runlist **runlists;
|
||||
/** Number of runlists supported by the h/w. */
|
||||
u32 max_runlists;
|
||||
|
||||
/** Array of runlists that are actually in use. */
|
||||
struct nvgpu_runlist_info *active_runlist_info;
|
||||
/** Array of actual HW runlists that are present on the GPU. */
|
||||
struct nvgpu_runlist *active_runlists;
|
||||
/** Number of active runlists. */
|
||||
u32 num_runlists;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -74,5 +74,6 @@ enum nvgpu_log_type {
|
||||
#define gpu_dbg_mig BIT(33) /* MIG info */
|
||||
#define gpu_dbg_rec BIT(34) /* Recovery sequence debugging. */
|
||||
#define gpu_dbg_zbc BIT(35) /* Gr ZBC */
|
||||
#define gpu_dbg_runlists BIT(38) /* Runlist related debugging. */
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -79,7 +79,7 @@ struct nvgpu_channel;
|
||||
/** Runlist identifier is invalid. */
|
||||
#define NVGPU_INVALID_RUNLIST_ID U32_MAX
|
||||
|
||||
struct nvgpu_runlist_info {
|
||||
struct nvgpu_runlist {
|
||||
/** Runlist identifier. */
|
||||
u32 runlist_id;
|
||||
/** Bitmap of active channels in the runlist. One bit per chid. */
|
||||
@@ -104,7 +104,7 @@ struct nvgpu_runlist_info {
|
||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
#if defined(CONFIG_NVGPU_NON_FUSA) && defined(CONFIG_NVGPU_NEXT)
|
||||
/* nvgpu next runlist info additions */
|
||||
struct nvgpu_next_runlist_info nvgpu_next;
|
||||
struct nvgpu_next_runlist nvgpu_next;
|
||||
#endif
|
||||
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
};
|
||||
@@ -128,7 +128,7 @@ struct nvgpu_runlist_info {
|
||||
* runlist buffer to describe all active channels and TSGs.
|
||||
*/
|
||||
u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
struct nvgpu_runlist_info *runlist,
|
||||
struct nvgpu_runlist *runlist,
|
||||
u32 buf_id, u32 max_entries);
|
||||
|
||||
/**
|
||||
@@ -365,4 +365,8 @@ u32 nvgpu_runlist_get_runlists_mask(struct gk20a *g, u32 id,
|
||||
*/
|
||||
void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f);
|
||||
/** @endcond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
|
||||
#define rl_dbg(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_runlists, "RL | " fmt, ##arg)
|
||||
|
||||
#endif /* NVGPU_RUNLIST_H */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2017-2020 NVIDIA Corporation. All rights reserved.
|
||||
* Copyright (C) 2017-2021 NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
@@ -64,7 +64,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
|
||||
struct nvgpu_tsg *tsg = NULL;
|
||||
|
||||
const struct nvgpu_device *dev;
|
||||
struct nvgpu_runlist_info *runlist;
|
||||
struct nvgpu_runlist *runlist;
|
||||
u32 runlist_id;
|
||||
int ret = SEQ_SKIP;
|
||||
|
||||
@@ -72,7 +72,7 @@ static int gk20a_fifo_sched_debugfs_seq_show(
|
||||
nvgpu_assert(dev != NULL);
|
||||
|
||||
runlist_id = dev->runlist_id;
|
||||
runlist = f->runlist_info[runlist_id];
|
||||
runlist = f->runlists[runlist_id];
|
||||
|
||||
if (ch == f->channel) {
|
||||
seq_puts(s, "chid tsgid pid timeslice timeout interleave graphics_preempt compute_preempt\n");
|
||||
|
||||
@@ -369,7 +369,7 @@ int test_gv11b_fifo_is_preempt_pending(struct unit_module *m, struct gk20a *g,
|
||||
u32 ctx_stat = 0U;
|
||||
u32 id = 0U, next_id = 0U;
|
||||
/* Assuming runlist_id is 0 */
|
||||
u32 runlist_served_pbdmas = g->fifo.runlist_info[0U]->pbdma_bitmask;
|
||||
u32 runlist_served_pbdmas = g->fifo.runlists[0U]->pbdma_bitmask;
|
||||
|
||||
timers_fi = nvgpu_timers_get_fault_injection();
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -223,12 +223,12 @@ int test_preempt_poll_tsg_on_pbdma(struct unit_module *m, struct gk20a *g,
|
||||
|
||||
if (branches & F_PREEMPT_POLL_PBDMA_BUSY) {
|
||||
unit_assert(stub[0].pbdma_id !=
|
||||
nvgpu_ffs(f->runlist_info[0]->pbdma_bitmask),
|
||||
nvgpu_ffs(f->runlists[0]->pbdma_bitmask),
|
||||
goto done);
|
||||
} else if (!(branches & F_PREEMPT_POLL_PBDMA_NULL)) {
|
||||
unit_assert(stub[0].tsgid == 0, goto done);
|
||||
unit_assert(stub[0].pbdma_id ==
|
||||
nvgpu_ffs(f->runlist_info[0]->pbdma_bitmask),
|
||||
nvgpu_ffs(f->runlists[0]->pbdma_bitmask),
|
||||
goto done);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,11 +152,11 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
|
||||
unsigned long *ch_map, struct nvgpu_tsg *tsgs,
|
||||
struct nvgpu_channel *chs, unsigned int num_tsgs,
|
||||
unsigned int num_channels,
|
||||
struct nvgpu_runlist_info **runlists, u32 *rl_data,
|
||||
struct nvgpu_runlist **runlists, u32 *rl_data,
|
||||
bool interleave)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info *runlist = runlists[0];
|
||||
struct nvgpu_runlist *runlist = runlists[0];
|
||||
|
||||
/* we only use the runlist 0 here */
|
||||
runlist->mem[0].aperture = APERTURE_SYSMEM;
|
||||
@@ -178,7 +178,7 @@ static void setup_fifo(struct gk20a *g, unsigned long *tsg_map,
|
||||
f->tsg = tsgs;
|
||||
f->channel = chs;
|
||||
f->num_channels = num_channels;
|
||||
f->runlist_info = runlists;
|
||||
f->runlists = runlists;
|
||||
|
||||
/*
|
||||
* For testing the runlist entry order format, these simpler dual-u32
|
||||
@@ -241,7 +241,7 @@ static int run_format_test(struct unit_module *m, struct nvgpu_fifo *f,
|
||||
setup_tsg_multich(tsg, chs, 0, prio, 5, n_ch);
|
||||
|
||||
/* entry capacity: tsg header and some channels */
|
||||
n = nvgpu_runlist_construct_locked(f, f->runlist_info[0], 0, 1 + n_ch);
|
||||
n = nvgpu_runlist_construct_locked(f, f->runlists[0], 0, 1 + n_ch);
|
||||
|
||||
if (n != 1 + n_ch) {
|
||||
return -1;
|
||||
@@ -308,8 +308,8 @@ static const char *f_runlist_format[] = {
|
||||
int test_tsg_format_gen(struct unit_module *m, struct gk20a *g, void *args)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info runlist;
|
||||
struct nvgpu_runlist_info *runlists = &runlist;
|
||||
struct nvgpu_runlist runlist;
|
||||
struct nvgpu_runlist *runlists = &runlist;
|
||||
unsigned long active_tsgs_map = 0;
|
||||
unsigned long active_chs_map = 0;
|
||||
struct nvgpu_tsg tsgs[1] = {{0}};
|
||||
@@ -412,8 +412,8 @@ static int test_common_gen(struct unit_module *m, struct gk20a *g,
|
||||
u32 *expected, u32 expect_count)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
struct nvgpu_runlist_info runlist;
|
||||
struct nvgpu_runlist_info *runlists = &runlist;
|
||||
struct nvgpu_runlist runlist;
|
||||
struct nvgpu_runlist *runlists = &runlist;
|
||||
unsigned long active_tsgs_map = 0;
|
||||
unsigned long active_chs_map = 0;
|
||||
struct nvgpu_tsg tsgs[6] = {{0}};
|
||||
@@ -978,8 +978,8 @@ done:
|
||||
#define F_RUNLIST_SETUP_LAST BIT(6)
|
||||
|
||||
static const char *f_runlist_setup[] = {
|
||||
"alloc_runlist_info_fail",
|
||||
"alloc_active_runlist_info_fail",
|
||||
"alloc_runlists_fail",
|
||||
"alloc_active_runlists_fail",
|
||||
"alloc_active_channels_fail",
|
||||
"alloc_active_tsgs_fail",
|
||||
"alloc_dma_flags_sys_fail",
|
||||
|
||||
@@ -293,7 +293,7 @@ int test_tsg_bind_channel(struct unit_module *m,
|
||||
struct nvgpu_channel *chA = NULL;
|
||||
struct nvgpu_channel *chB = NULL;
|
||||
struct nvgpu_channel *ch = NULL;
|
||||
struct nvgpu_runlist_info *runlist = NULL;
|
||||
struct nvgpu_runlist *runlist = NULL;
|
||||
u32 branches = 0U;
|
||||
int ret = UNIT_FAIL;
|
||||
int err;
|
||||
@@ -344,7 +344,7 @@ int test_tsg_bind_channel(struct unit_module *m,
|
||||
ch->runlist_id + 1 : tsg_save.runlist_id;
|
||||
|
||||
/* ch already already active */
|
||||
runlist = &f->active_runlist_info[tsg->runlist_id];
|
||||
runlist = &f->active_runlists[tsg->runlist_id];
|
||||
if (branches & F_TSG_BIND_CHANNEL_ACTIVE) {
|
||||
nvgpu_set_bit(ch->chid, runlist->active_channels);
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user