mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: couple runlist domains and nvs
Now that the main nvsched code exists in the nvgpu build, make it control the runlist domains. As a new nvs domain is created, create the relevant runlist data too. To support the default domain, create a default nvs domain at boot. The scheduling domain code owns the responsibility of domain lifetime, and runlist domains exist to serve that logic although the RL domains are directly used by channel and TSG logic. Add refcounting to the scheduler uapi level to make sure that busy domains (that still have TSG participants) do not get removed too early. Adjust error injection sensitive unit tests to match the updated logic. Jira NVGPU-6425 Jira NVGPU-6427 Change-Id: I1beec97c54c60ad334165b1c0acb5e827c24f2ac Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2632287 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
1d14a4412f
commit
632644b44a
@@ -663,6 +663,14 @@ int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (tsg->rl_domain == NULL) {
|
||||
/*
|
||||
* "Success" case because the TSG is not participating in
|
||||
* scheduling at the moment, so there is nothing to be done.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
return nvgpu_runlist_do_update(g, rl, tsg->rl_domain, ch, add, wait_for_finish);
|
||||
}
|
||||
|
||||
@@ -1146,10 +1154,33 @@ static void nvgpu_init_active_runlist_mapping(struct gk20a *g)
|
||||
}
|
||||
}
|
||||
|
||||
static int nvgpu_runlist_alloc_default_domain(struct gk20a *g)
|
||||
{
|
||||
#ifndef CONFIG_NVS_PRESENT
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < g->fifo.num_runlists; i++) {
|
||||
struct nvgpu_runlist *runlist = &f->active_runlists[i];
|
||||
|
||||
runlist->domain = nvgpu_runlist_domain_alloc(g, runlist, "(default)");
|
||||
if (runlist->domain == NULL) {
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
/*
|
||||
* deletion of prior domains happens in
|
||||
* nvgpu_runlist_cleanup_sw() via the caller.
|
||||
*/
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
u32 num_runlists = 0U, i;
|
||||
u32 num_runlists = 0U;
|
||||
unsigned int runlist_id;
|
||||
int err = 0;
|
||||
|
||||
@@ -1190,17 +1221,10 @@ int nvgpu_runlist_setup_sw(struct gk20a *g)
|
||||
|
||||
nvgpu_init_active_runlist_mapping(g);
|
||||
|
||||
for (i = 0; i < g->fifo.num_runlists; i++) {
|
||||
struct nvgpu_runlist *runlist = &f->active_runlists[i];
|
||||
|
||||
runlist->domain = nvgpu_runlist_domain_alloc(g, runlist, "(default)");
|
||||
if (runlist->domain == NULL) {
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
err = -ENOMEM;
|
||||
err = nvgpu_runlist_alloc_default_domain(g);
|
||||
if (err != 0) {
|
||||
goto clean_up_runlist;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
g->ops.runlist.init_enginfo(g, f);
|
||||
return 0;
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include <nvgpu/gr/config.h>
|
||||
#include <nvgpu/gr/ctx.h>
|
||||
#include <nvgpu/runlist.h>
|
||||
#include <nvgpu/nvs.h>
|
||||
#include <nvgpu/static_analysis.h>
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#ifdef CONFIG_NVGPU_PROFILER
|
||||
@@ -156,7 +157,8 @@ int nvgpu_tsg_bind_channel(struct nvgpu_tsg *tsg, struct nvgpu_channel *ch)
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
|
||||
{
|
||||
struct nvgpu_runlist_domain *domain;
|
||||
struct nvgpu_runlist_domain *rl_domain;
|
||||
struct nvgpu_nvs_domain *nvs_domain;
|
||||
struct gk20a *g = tsg->g;
|
||||
|
||||
/* Hopping channels from one domain to another is not allowed */
|
||||
@@ -164,16 +166,28 @@ int nvgpu_tsg_bind_domain(struct nvgpu_tsg *tsg, const char *domain_name)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvs_domain = nvgpu_nvs_domain_get(g, domain_name);
|
||||
if (nvs_domain == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* The domain ptr will get updated with the right id once the runlist
|
||||
* gets specified based on the first channel.
|
||||
*/
|
||||
domain = nvgpu_rl_domain_get(g, 0, domain_name);
|
||||
if (domain == NULL) {
|
||||
rl_domain = nvgpu_rl_domain_get(g, 0, domain_name);
|
||||
if (rl_domain == NULL) {
|
||||
/*
|
||||
* This shouldn't happen because the nvs domain guarantees RL domains.
|
||||
*
|
||||
* TODO: query this via the nvs domain.
|
||||
*/
|
||||
nvgpu_nvs_domain_put(g, nvs_domain);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
tsg->rl_domain = domain;
|
||||
tsg->rl_domain = rl_domain;
|
||||
tsg->nvs_domain = nvs_domain;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -844,6 +858,7 @@ int nvgpu_tsg_open_common(struct gk20a *g, struct nvgpu_tsg *tsg, pid_t pid)
|
||||
* gets specified based on the first channel.
|
||||
*/
|
||||
tsg->rl_domain = nvgpu_rl_domain_get(g, 0, "(default)");
|
||||
tsg->nvs_domain = nvgpu_nvs_domain_get(g, "(default)");
|
||||
#ifdef CONFIG_NVGPU_DEBUGGER
|
||||
tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
|
||||
#endif
|
||||
@@ -926,6 +941,12 @@ void nvgpu_tsg_release_common(struct gk20a *g, struct nvgpu_tsg *tsg)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (tsg->nvs_domain != NULL) {
|
||||
nvgpu_nvs_domain_put(g, tsg->nvs_domain);
|
||||
tsg->nvs_domain = NULL;
|
||||
tsg->rl_domain = NULL;
|
||||
}
|
||||
|
||||
if (tsg->vm != NULL) {
|
||||
nvgpu_vm_put(tsg->vm);
|
||||
tsg->vm = NULL;
|
||||
|
||||
@@ -52,9 +52,7 @@
|
||||
#include <nvgpu/cic_mon.h>
|
||||
#include <nvgpu/cic_rm.h>
|
||||
#include <nvgpu/fbp.h>
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
#include <nvgpu/nvs.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_LS_PMU
|
||||
#include <nvgpu/pmu/pmu_pstate.h>
|
||||
@@ -858,6 +856,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
||||
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.mm.init_mm_support, NO_FLAG),
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.fifo.fifo_init_support, NO_FLAG),
|
||||
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.therm.elcg_init_idle_filters,
|
||||
NO_FLAG),
|
||||
NVGPU_INIT_TABLE_ENTRY(&nvgpu_netlist_init_ctx_vars, NO_FLAG),
|
||||
@@ -923,9 +922,6 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
||||
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) || defined(CONFIG_NVGPU_GSP_STRESS_TEST)
|
||||
/* Init gsp ops */
|
||||
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sw_init, NO_FLAG),
|
||||
#endif
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
|
||||
#endif
|
||||
};
|
||||
size_t i;
|
||||
|
||||
@@ -26,20 +26,32 @@
|
||||
#include <nvgpu/nvs.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
#include <nvgpu/runlist.h>
|
||||
|
||||
static struct nvs_sched_ops nvgpu_nvs_ops = {
|
||||
.preempt = NULL,
|
||||
.recover = NULL,
|
||||
};
|
||||
|
||||
/**
|
||||
* Init call to prepare the scheduler mutex. We won't actually allocate a
|
||||
* scheduler until someone opens the scheduler node.
|
||||
*/
|
||||
int nvgpu_nvs_init(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_nvs_domain *domain;
|
||||
int err;
|
||||
|
||||
nvgpu_mutex_init(&g->sched_mutex);
|
||||
|
||||
err = nvgpu_nvs_open(g);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
if (nvgpu_rl_domain_get(g, 0, "(default)") == NULL) {
|
||||
int err = nvgpu_nvs_add_domain(g, "(default)", 1000*1000, 10*1000, &domain);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -121,6 +133,7 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
|
||||
}
|
||||
|
||||
nvgpu_dom->id = nvgpu_nvs_new_id(g);
|
||||
nvgpu_dom->ref = 1U;
|
||||
|
||||
nvs_dom = nvs_domain_create(g->scheduler->sched, name,
|
||||
timeslice, preempt_grace, nvgpu_dom);
|
||||
@@ -131,6 +144,14 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u32 timeslice,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
err = nvgpu_rl_domain_alloc(g, name);
|
||||
if (err != 0) {
|
||||
nvs_domain_destroy(g->scheduler->sched, nvs_dom);
|
||||
nvgpu_kfree(g, nvgpu_dom);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
nvgpu_dom->parent = nvs_dom;
|
||||
|
||||
*pdomain = nvgpu_dom;
|
||||
@@ -155,6 +176,36 @@ nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nvgpu_nvs_domain *
|
||||
nvgpu_nvs_domain_get(struct gk20a *g, const char *name)
|
||||
{
|
||||
struct nvs_domain *nvs_dom;
|
||||
struct nvgpu_nvs_domain *dom = NULL;
|
||||
struct nvgpu_nvs_scheduler *sched = g->scheduler;
|
||||
|
||||
nvgpu_mutex_acquire(&g->sched_mutex);
|
||||
|
||||
nvs_dom = nvs_domain_by_name(sched->sched, name);
|
||||
if (nvs_dom == NULL) {
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
dom = nvs_dom->priv;
|
||||
dom->ref++;
|
||||
|
||||
unlock:
|
||||
nvgpu_mutex_release(&g->sched_mutex);
|
||||
return dom;
|
||||
}
|
||||
|
||||
void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom)
|
||||
{
|
||||
nvgpu_mutex_acquire(&g->sched_mutex);
|
||||
dom->ref--;
|
||||
WARN_ON(dom->ref == 0U);
|
||||
nvgpu_mutex_release(&g->sched_mutex);
|
||||
}
|
||||
|
||||
int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
|
||||
{
|
||||
struct nvgpu_nvs_domain *nvgpu_dom;
|
||||
@@ -168,12 +219,32 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
|
||||
nvgpu_dom = nvgpu_nvs_get_dom_by_id(g, g->scheduler->sched, dom_id);
|
||||
if (nvgpu_dom == NULL) {
|
||||
nvs_dbg(g, "domain %llu does not exist!", dom_id);
|
||||
err = -EINVAL;
|
||||
err = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (nvgpu_dom->ref != 1U) {
|
||||
nvs_dbg(g, "domain %llu is still in use! refs: %u",
|
||||
dom_id, nvgpu_dom->ref);
|
||||
err = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
nvs_dom = nvgpu_dom->parent;
|
||||
|
||||
err = nvgpu_rl_domain_delete(g, nvs_dom->name);
|
||||
if (err != 0) {
|
||||
nvs_dbg(g, "failed to delete RL domains on %llu!", dom_id);
|
||||
/*
|
||||
* The RL domains require the existence of at least one domain;
|
||||
* this path inherits that logic until it's been made more
|
||||
* flexible.
|
||||
*/
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
nvgpu_dom->ref = 0U;
|
||||
|
||||
nvs_domain_destroy(g->scheduler->sched, nvs_dom);
|
||||
nvgpu_kfree(g, nvgpu_dom);
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
#include <nvgpu/nvgpu_init.h>
|
||||
#include <nvgpu/device.h>
|
||||
#include <nvgpu/fb.h>
|
||||
#include <nvgpu/nvs.h>
|
||||
|
||||
#include "init_vgpu.h"
|
||||
#include "hal/vgpu/init/init_hal_vgpu.h"
|
||||
@@ -228,6 +229,12 @@ int vgpu_finalize_poweron_common(struct gk20a *g)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = nvgpu_nvs_init(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init gk20a nvs");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = vgpu_fbp_init_support(g);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to init gk20a fbp");
|
||||
|
||||
@@ -23,7 +23,9 @@
|
||||
#ifndef NVGPU_NVS_H
|
||||
#define NVGPU_NVS_H
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
#include <nvs/domain.h>
|
||||
#endif
|
||||
|
||||
#include <nvgpu/atomic.h>
|
||||
#include <nvgpu/lock.h>
|
||||
@@ -52,6 +54,15 @@ struct nvgpu_nvs_domain {
|
||||
* Convenience pointer for linking back to the parent object.
|
||||
*/
|
||||
struct nvs_domain *parent;
|
||||
|
||||
/*
|
||||
* Domains are dynamically used by their participant TSGs and the
|
||||
* runlist HW. A refcount prevents them from getting prematurely freed.
|
||||
*
|
||||
* This is not the usual refcount. The primary owner is userspace via the
|
||||
* ioctl layer and a TSG putting a ref does not result in domain deletion.
|
||||
*/
|
||||
u32 ref;
|
||||
};
|
||||
|
||||
struct nvgpu_nvs_scheduler {
|
||||
@@ -59,6 +70,7 @@ struct nvgpu_nvs_scheduler {
|
||||
nvgpu_atomic64_t id_counter;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
int nvgpu_nvs_init(struct gk20a *g);
|
||||
int nvgpu_nvs_open(struct gk20a *g);
|
||||
void nvgpu_nvs_get_log(struct gk20a *g, s64 *timestamp, const char **msg);
|
||||
@@ -70,10 +82,28 @@ struct nvgpu_nvs_domain *
|
||||
nvgpu_nvs_get_dom_by_id(struct gk20a *g, struct nvs_sched *sched, u64 dom_id);
|
||||
void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain);
|
||||
|
||||
struct nvgpu_nvs_domain *
|
||||
nvgpu_nvs_domain_get(struct gk20a *g, const char *name);
|
||||
void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom);
|
||||
/*
|
||||
* Debug wrapper for NVS code.
|
||||
*/
|
||||
#define nvs_dbg(g, fmt, arg...) \
|
||||
nvgpu_log(g, gpu_dbg_nvs, fmt, ##arg)
|
||||
|
||||
#else
|
||||
static inline int nvgpu_nvs_init(struct gk20a *g)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline struct nvgpu_nvs_domain *
|
||||
nvgpu_nvs_domain_get(struct gk20a *g, const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline void nvgpu_nvs_domain_put(struct gk20a *g, struct nvgpu_nvs_domain *dom)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
@@ -55,6 +55,8 @@ struct nvgpu_gr_ctx;
|
||||
struct nvgpu_channel_hw_state;
|
||||
struct nvgpu_profiler_object;
|
||||
struct nvgpu_runlist;
|
||||
struct nvgpu_runlist_domain;
|
||||
struct nvgpu_nvs_domain;
|
||||
|
||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_CONTROL
|
||||
enum nvgpu_event_id_type;
|
||||
@@ -177,10 +179,15 @@ struct nvgpu_tsg {
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
/**
|
||||
* Scheduling domain this TSG is bound to. Bound with an ioctl, initially the default domain.
|
||||
* Runlist domain this TSG is bound to. Bound with an ioctl, initially the default domain.
|
||||
*/
|
||||
struct nvgpu_runlist_domain *rl_domain;
|
||||
|
||||
/*
|
||||
* A TSG keeps a ref to its scheduling domain so that active domains can't be deleted.
|
||||
*/
|
||||
struct nvgpu_nvs_domain *nvs_domain;
|
||||
|
||||
/** tgid (OS specific) of the process that openend the TSG. */
|
||||
|
||||
/**
|
||||
|
||||
@@ -676,6 +676,7 @@ nvgpu_runlist_unlock_active_runlists
|
||||
nvgpu_runlist_unlock_runlists
|
||||
nvgpu_runlist_update
|
||||
nvgpu_runlist_update_locked
|
||||
nvgpu_rl_domain_alloc
|
||||
nvgpu_rwsem_init
|
||||
nvgpu_rwsem_down_read
|
||||
nvgpu_rwsem_down_write
|
||||
|
||||
@@ -693,6 +693,7 @@ nvgpu_runlist_unlock_active_runlists
|
||||
nvgpu_runlist_unlock_runlists
|
||||
nvgpu_runlist_update
|
||||
nvgpu_runlist_update_locked
|
||||
nvgpu_rl_domain_alloc
|
||||
nvgpu_rwsem_init
|
||||
nvgpu_rwsem_down_read
|
||||
nvgpu_rwsem_down_write
|
||||
|
||||
@@ -1529,6 +1529,13 @@ static int stub_fifo_preempt_channel(struct gk20a *g, struct nvgpu_channel *ch)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int stub_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_runlist_domain *domain,
|
||||
bool add, bool wait_for_finish)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void stub_channel_work_completion_cancel_sync(struct nvgpu_channel *ch)
|
||||
{
|
||||
|
||||
@@ -1560,6 +1567,7 @@ int test_channel_suspend_resume_serviceable_chs(struct unit_module *m,
|
||||
|
||||
g->ops.fifo.preempt_tsg = stub_fifo_preempt_tsg;
|
||||
g->ops.fifo.preempt_channel = stub_fifo_preempt_channel;
|
||||
g->ops.runlist.reload = stub_runlist_reload;
|
||||
orig_ch_tsgid = ch->tsgid;
|
||||
|
||||
for (branches = 0U; branches < F_CHANNEL_SUSPEND_RESUME_CHS_LAST;
|
||||
|
||||
@@ -124,6 +124,7 @@ int test_fifo_suspend(struct unit_module *m, struct gk20a *g, void *args)
|
||||
|
||||
gv11b_init_hal(g);
|
||||
gops = g->ops;
|
||||
nvgpu_device_init(g);
|
||||
g->ops.fifo.bar1_snooping_disable = stub_fifo_bar1_snooping_disable;
|
||||
err = nvgpu_fifo_init_support(g);
|
||||
unit_assert(err == 0, goto done);
|
||||
@@ -175,6 +176,7 @@ int test_fifo_sw_quiesce(struct unit_module *m, struct gk20a *g, void *args)
|
||||
|
||||
gv11b_init_hal(g);
|
||||
gops = g->ops;
|
||||
nvgpu_device_init(g);
|
||||
err = nvgpu_fifo_init_support(g);
|
||||
unit_assert(err == 0, goto done);
|
||||
|
||||
@@ -206,15 +208,19 @@ done:
|
||||
* NOTE: nvgpu_engine_setup_sw() consists of 2 memory allocations.
|
||||
* Selecting branch for nvgpu_runlist_setup_sw() fail case accordingly.
|
||||
*/
|
||||
#define F_FIFO_SETUP_SW_COMMON_ENGINE_FAIL2 BIT(5)
|
||||
#define F_FIFO_SETUP_SW_COMMON_RUNLIST_FAIL BIT(6)
|
||||
#define F_FIFO_SETUP_SW_PBDMA_NULL BIT(7)
|
||||
#define F_FIFO_CLEANUP_SW_PBDMA_NULL BIT(8)
|
||||
#define F_FIFO_SETUP_HW_PASS BIT(9)
|
||||
#define F_FIFO_SETUP_HW_FAIL BIT(10)
|
||||
#define F_FIFO_INIT_LAST BIT(11)
|
||||
/*
|
||||
* The fifo setup too contains another allocation.
|
||||
*/
|
||||
#define F_FIFO_SETUP_SW_COMMON_RUNLIST_FAIL2 BIT(7)
|
||||
#define F_FIFO_SETUP_SW_PBDMA_NULL BIT(8)
|
||||
#define F_FIFO_CLEANUP_SW_PBDMA_NULL BIT(9)
|
||||
#define F_FIFO_SETUP_HW_PASS BIT(10)
|
||||
#define F_FIFO_SETUP_HW_FAIL BIT(11)
|
||||
#define F_FIFO_INIT_LAST BIT(12)
|
||||
|
||||
static const char *f_fifo_init[] = {
|
||||
"fifo init support",
|
||||
"fifo init sw ready",
|
||||
"channel setup sw fail",
|
||||
"tsg setup sw fail",
|
||||
@@ -222,6 +228,7 @@ static const char *f_fifo_init[] = {
|
||||
"engine setup sw fail",
|
||||
"",
|
||||
"runlist setup sw fail",
|
||||
"runlist setup 2 sw fail",
|
||||
"pbdma setup sw NULL",
|
||||
"pbdma cleanup sw NULL",
|
||||
"fifo setup hw pass",
|
||||
@@ -250,8 +257,7 @@ int test_init_support(struct unit_module *m, struct gk20a *g, void *args)
|
||||
F_FIFO_SETUP_SW_COMMON_TSG_FAIL |
|
||||
F_FIFO_SETUP_SW_COMMON_PBDMA_FAIL |
|
||||
F_FIFO_SETUP_SW_COMMON_ENGINE_FAIL |
|
||||
F_FIFO_SETUP_SW_COMMON_RUNLIST_FAIL |
|
||||
F_FIFO_CLEANUP_SW_PBDMA_NULL;
|
||||
F_FIFO_SETUP_SW_COMMON_RUNLIST_FAIL;
|
||||
u32 fail = F_FIFO_SETUP_HW_FAIL | alloc_fail;
|
||||
u32 prune = F_FIFO_SETUP_SW_READY | F_FIFO_SETUP_SW_PBDMA_NULL |
|
||||
F_FIFO_SETUP_HW_PASS | fail;
|
||||
|
||||
@@ -78,14 +78,17 @@ int test_gk20a_runlist_hw_submit(struct unit_module *m,
|
||||
struct nvgpu_fifo *f = &g->fifo;
|
||||
int ret = UNIT_FAIL;
|
||||
u32 runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||
struct nvgpu_runlist *runlist = g->fifo.runlists[runlist_id];
|
||||
u32 count;
|
||||
|
||||
nvgpu_rl_domain_alloc(g, "(default)");
|
||||
|
||||
for (count = 0; count < 2; count++) {
|
||||
|
||||
nvgpu_writel(g, fifo_runlist_r(), 0);
|
||||
nvgpu_writel(g, fifo_runlist_base_r(), 0);
|
||||
|
||||
f->runlists[runlist_id]->domain->mem_hw->count = count;
|
||||
runlist->domain->mem_hw->count = count;
|
||||
|
||||
gk20a_runlist_hw_submit(g, f->runlists[runlist_id]);
|
||||
if (count == 0) {
|
||||
|
||||
Reference in New Issue
Block a user