gpu: nvgpu: gsp: Create functions to pass nvs data to gsp firmware

Changes:
- created functions to populate gsp interface data from nvs and runlist
structures.
- Handled both user domains and shadow domains.
- Provided support for four engines from two.

NVGPU-8531

Signed-off-by: vivekku <vivekku@nvidia.com>
Change-Id: I1d9ec9ded8a9b47a5b2a00c44dacbab22e3b04b1
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2743596
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: svc-mobile-misra <svc-mobile-misra@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
vivekku
2022-07-12 07:23:16 +00:00
committed by mobile promotions
parent 12b539aa69
commit 5bb56723be
14 changed files with 386 additions and 53 deletions

View File

@@ -337,6 +337,7 @@ gsp_sched:
common/gsp_scheduler/ipc/gsp_msg.h, common/gsp_scheduler/ipc/gsp_msg.h,
common/gsp_scheduler/gsp_scheduler.c, common/gsp_scheduler/gsp_scheduler.c,
common/gsp_scheduler/gsp_scheduler.h, common/gsp_scheduler/gsp_scheduler.h,
common/gsp_scheduler/gsp_nvs.c,
common/gsp_scheduler/gsp_runlist.c, common/gsp_scheduler/gsp_runlist.c,
common/gsp_scheduler/gsp_runlist.h, common/gsp_scheduler/gsp_runlist.h,
common/gsp_scheduler/gsp_ctrl_fifo.c, common/gsp_scheduler/gsp_ctrl_fifo.c,

View File

@@ -444,7 +444,8 @@ nvgpu-$(CONFIG_NVGPU_GSP_SCHEDULER) += \
common/gsp_scheduler/ipc/gsp_msg.o \ common/gsp_scheduler/ipc/gsp_msg.o \
common/gsp_scheduler/gsp_scheduler.o \ common/gsp_scheduler/gsp_scheduler.o \
common/gsp_scheduler/gsp_runlist.o \ common/gsp_scheduler/gsp_runlist.o \
common/gsp_scheduler/gsp_ctrl_fifo.o common/gsp_scheduler/gsp_ctrl_fifo.o \
common/gsp_scheduler/gsp_nvs.o
endif endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y) ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y)

View File

@@ -203,7 +203,8 @@ srcs += common/gsp/gsp_init.c \
common/gsp_scheduler/ipc/gsp_msg.c \ common/gsp_scheduler/ipc/gsp_msg.c \
common/gsp_scheduler/gsp_scheduler.c \ common/gsp_scheduler/gsp_scheduler.c \
common/gsp_scheduler/gsp_runlist.c \ common/gsp_scheduler/gsp_runlist.c \
common/gsp_scheduler/gsp_ctrl_fifo.c common/gsp_scheduler/gsp_ctrl_fifo.c \
common/gsp_scheduler/gsp_nvs.c
endif endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),1) ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),1)

View File

@@ -39,7 +39,12 @@
#include <nvgpu/pmu/mutex.h> #include <nvgpu/pmu/mutex.h>
#endif #endif
#include <nvgpu/nvgpu_init.h> #include <nvgpu/nvgpu_init.h>
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include <nvgpu/gsp_sched.h>
#endif
#ifdef CONFIG_NVS_PRESENT
#include <nvgpu/nvs.h>
#endif
void nvgpu_runlist_lock_active_runlists(struct gk20a *g) void nvgpu_runlist_lock_active_runlists(struct gk20a *g)
{ {
struct nvgpu_fifo *f = &g->fifo; struct nvgpu_fifo *f = &g->fifo;
@@ -1339,3 +1344,53 @@ void nvgpu_runlist_unlock_runlists(struct gk20a *g, u32 runlists_mask)
} }
} }
} }
s32 nvgpu_runlist_get_runlist_info(struct gk20a *g, u32 rl_index, u32 *runlist_id,
u8 *device_id)
{
struct nvgpu_fifo *f = &g->fifo;
s32 err = 0;
u32 device_id_u32 = 0;
struct nvgpu_runlist *runlist = &f->active_runlists[rl_index];
err = (s32)(nvgpu_runlist_get_device_id(g, runlist, &device_id_u32));
if (err != 0) {
nvgpu_err(g, "error in getting device ID");
goto exit;
}
*device_id = nvgpu_safe_cast_u32_to_u8(device_id_u32);
*runlist_id = runlist->id;
exit:
return err;
}
s32 nvgpu_runlist_get_device_id(struct gk20a *g, struct nvgpu_runlist *rl, u32 *device_id)
{
u8 dev;
s32 err = 0;
for (dev = 0; dev < (u8)(RLENG_PER_RUNLIST_SIZE); dev++) {
u32 rl_pribase =rl->rl_dev_list[dev]->rl_pri_base;
if (rl->runlist_pri_base == rl_pribase) {
*device_id = rl->rl_dev_list[dev]->engine_id;
goto exit;
}
}
err = (s32)(-EINVAL);
nvgpu_err(g, "Get device ID failed:");
exit:
return err;
}
u32 nvgpu_runlist_get_num_runlists(struct gk20a *g)
{
struct nvgpu_fifo f = g->fifo;
return f.num_runlists;
}
struct nvgpu_runlist_domain *nvgpu_runlist_get_shadow_domain(struct gk20a *g)
{
return g->fifo.active_runlists[0].shadow_rl_domain;
}

View File

@@ -0,0 +1,173 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/gsp.h>
#include <nvgpu/runlist.h>
#include <nvgpu/string.h>
#ifdef CONFIG_NVS_PRESENT
#include <nvgpu/nvs.h>
#endif
#include <nvgpu/gsp_sched.h>
#include <nvgpu/device.h>
#include <nvgpu/utils.h>
#include "gsp_runlist.h"
static int gsp_nvs_update_runlist_info(struct gk20a *g,
struct nvgpu_gsp_runlist_info *gsp_runlist, struct nvgpu_runlist *rl)
{
int err = 0;
u64 runlist_iova = nvgpu_mem_get_addr(g, &rl->domain->mem_hw->mem);
u32 num_entries = rl->domain->mem_hw->count;
u32 aperture = g->ops.runlist.get_runlist_aperture(g, &rl->domain->mem_hw->mem);
u32 device_id = 0;
nvgpu_gsp_dbg(g, " ");
gsp_runlist->domain_id = u64_lo32(rl->domain->domain_id);
gsp_runlist->runlist_id = rl->id;
gsp_runlist->aperture = aperture;
gsp_runlist->runlist_base_lo = u64_lo32(runlist_iova);
gsp_runlist->runlist_base_hi = u64_hi32(runlist_iova);
gsp_runlist->num_entries = num_entries;
gsp_runlist->is_runlist_valid = true;
err = nvgpu_runlist_get_device_id(g, rl, &device_id);
if (err != 0) {
nvgpu_err(g, "updating engine ID to gsp runlist info failed");
}
gsp_runlist->device_id = nvgpu_safe_cast_u32_to_u8(device_id);
return err;
}
#ifdef CONFIG_NVS_PRESENT
static int gsp_nvs_get_runlist_info(struct gk20a *g, struct nvgpu_gsp_domain_info *gsp_domain,
u64 nvgpu_domain_id)
{
u32 num_runlists;
int err = 0;
u64 runlist_iova = 0;
u32 num_entries = 0;
u32 aperture = 0;
u32 runlist_id;
u8 device_id;
u32 i;
struct nvgpu_gsp_runlist_info *gsp_runlist;
nvgpu_gsp_dbg(g, " ");
num_runlists = nvgpu_runlist_get_num_runlists(g);
for (i = 0; i < num_runlists; i++) {
gsp_runlist = &gsp_domain->runlist_info[i];
err = nvgpu_nvs_gsp_get_runlist_domain_info(g, nvgpu_domain_id, &num_entries,
&runlist_iova, &aperture, i);
if (err != 0) {
nvgpu_err(g, "gsp error in getting domain info ID: %u", gsp_domain->domain_id);
continue;
}
err = nvgpu_runlist_get_runlist_info(g, i, &runlist_id, &device_id);
if( err != 0) {
nvgpu_err(g, "gsp error in getting runlist info Index: %u", i);
continue;
}
gsp_runlist->aperture = aperture;
gsp_runlist->device_id = device_id;
gsp_runlist->domain_id = gsp_domain->domain_id;
gsp_runlist->is_runlist_valid = true;
gsp_runlist->num_entries = num_entries;
gsp_runlist->runlist_base_lo = u64_lo32(runlist_iova);
gsp_runlist->runlist_base_hi = u64_hi32(runlist_iova);
gsp_runlist->runlist_id = runlist_id;
}
return err;
}
static int gsp_nvs_get_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
struct nvgpu_gsp_domain_info *gsp_domain)
{
int err = 0;
u32 domain_id;
u32 timeslice_ns;
nvgpu_gsp_dbg(g, " ");
nvgpu_nvs_get_gsp_domain_info(g, nvgpu_domain_id,
&domain_id, &timeslice_ns);
gsp_domain->domain_id = domain_id;
gsp_domain->priority = 0;
gsp_domain->time_slicing = timeslice_ns;
err = gsp_nvs_get_runlist_info(g, gsp_domain, nvgpu_domain_id);
if (err != 0) {
nvgpu_err(g, "copy of gsp runlist info failed");
goto exit;
}
exit:
return err;
}
#endif
#ifdef CONFIG_NVS_PRESENT
/* this function adds nvs domain info to the gsp domain info containers */
int nvgpu_gsp_nvs_add_domain(struct gk20a *g, u64 nvgpu_domain_id)
{
struct nvgpu_gsp_domain_info gsp_domain = { };
int err = 0;
nvgpu_gsp_dbg(g, " ");
err = gsp_nvs_get_domain_info(g, nvgpu_domain_id, &gsp_domain);
if (err != 0) {
nvgpu_err(g, " gsp domain data copy to cmd buffer failed");
goto exit;
}
err = nvgpu_gsp_sched_domain_add(g, &gsp_domain);
if (err != 0) {
nvgpu_err(g, "gsp add domain failed");
goto exit;
}
exit:
return err;
}
#endif
/* function to request delete the domain by id */
int nvgpu_gsp_nvs_delete_domain(struct gk20a *g, u64 nvgpu_domain_id)
{
int err = 0;
nvgpu_gsp_dbg(g, " ");
// request for deletion of the domain with id
err = nvgpu_gsp_sched_domain_delete(g, u64_lo32(nvgpu_domain_id));
if (err != 0) {
nvgpu_err(g, "domain delete failed");
}
return err;
}
/* funtion to update the runlist domain of gsp */
int nvgpu_gps_sched_update_runlist(struct gk20a *g, struct nvgpu_runlist *rl)
{
struct nvgpu_gsp_runlist_info gsp_runlist = { };
int err = 0;
nvgpu_gsp_dbg(g, " ");
/* copy runlist data to cmd buffer */
err = gsp_nvs_update_runlist_info(g, &gsp_runlist, rl);
if (err != 0){
nvgpu_err(g, "gsp runlist update to cmd failed");
goto exit;
}
err = nvgpu_gsp_sched_runlist_update(g, &gsp_runlist);
if (err != 0) {
nvgpu_err(g, "command buffer for runlist sent failed");
goto exit;
}
exit:
return err;
}

View File

@@ -147,12 +147,11 @@ static int gsp_get_async_ce(struct gk20a *g, struct nvgpu_device *device,
return 0; return 0;
} }
static void gsp_get_device_info(struct gk20a *g, u8 device_id, static void gsp_get_device_info(struct gk20a *g, struct nvgpu_gsp_device_info *dev_info,
struct nvgpu_gsp_device_info *dev_info,
const struct nvgpu_device *device) const struct nvgpu_device *device)
{ {
/* copy domain info into cmd buffer */ /* copy domain info into cmd buffer */
dev_info->device_id = device_id; dev_info->device_id = nvgpu_safe_cast_u32_to_u8(device->engine_id);
dev_info->is_engine = true; dev_info->is_engine = true;
dev_info->engine_type = device->type; dev_info->engine_type = device->type;
dev_info->engine_id = device->engine_id; dev_info->engine_id = device->engine_id;
@@ -165,7 +164,7 @@ static void gsp_get_device_info(struct gk20a *g, u8 device_id,
} }
static int gsp_sched_send_devices_info(struct gk20a *g, static int gsp_sched_send_devices_info(struct gk20a *g,
u8 device_id, const struct nvgpu_device *device) const struct nvgpu_device *device)
{ {
struct nv_flcn_cmd_gsp cmd = { }; struct nv_flcn_cmd_gsp cmd = { };
int err = 0; int err = 0;
@@ -173,7 +172,7 @@ static int gsp_sched_send_devices_info(struct gk20a *g,
nvgpu_gsp_dbg(g, " "); nvgpu_gsp_dbg(g, " ");
/* copy domain info into cmd buffer */ /* copy domain info into cmd buffer */
gsp_get_device_info(g, device_id, &cmd.cmd.device, device); gsp_get_device_info(g, &cmd.cmd.device, device);
err = gsp_send_cmd_and_wait_for_ack(g, &cmd, err = gsp_send_cmd_and_wait_for_ack(g, &cmd,
NV_GSP_UNIT_DEVICES_INFO, sizeof(struct nvgpu_gsp_device_info)); NV_GSP_UNIT_DEVICES_INFO, sizeof(struct nvgpu_gsp_device_info));
@@ -185,37 +184,33 @@ int nvgpu_gsp_sched_send_devices_info(struct gk20a *g)
{ {
const struct nvgpu_device *gr_dev = NULL; const struct nvgpu_device *gr_dev = NULL;
struct nvgpu_device ce_dev = { }; struct nvgpu_device ce_dev = { };
u8 instance = 0;
int err = 0; int err = 0;
u8 engine_instance = 0;
for (engine_instance = 0; engine_instance < GSP_SCHED_ENGINE_INSTANCE; engine_instance++) {
// handling GR engine
gr_dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, engine_instance);
if (gr_dev == NULL) {
err = -ENXIO;
nvgpu_err(g, " Get GR device info failed ID: %d", engine_instance);
goto exit;
}
err = gsp_sched_send_devices_info(g, gr_dev);
if (err != 0) {
nvgpu_err(g, "Sending GR engine info failed ID: %d", engine_instance);
goto exit;
}
/* // handling Async engine
* Only GR0 is supported err = gsp_get_async_ce(g, &ce_dev, engine_instance);
*/ if (err != 0) {
gr_dev = nvgpu_device_get(g, NVGPU_DEVTYPE_GRAPHICS, instance); nvgpu_err(g, "Getting Async engine failed ID: %d", engine_instance);
if (gr_dev == NULL) { goto exit;
nvgpu_err(g, "Get GR0 device info failed"); }
goto exit; err = gsp_sched_send_devices_info(g, &ce_dev);
} if (err != 0) {
err = gsp_sched_send_devices_info(g, nvgpu_err(g, "Sending Async engin info failed ID: %d", engine_instance);
GSP_SCHED_GR0_DEVICE_ID, gr_dev); goto exit;
if (err != 0) { }
nvgpu_err(g, "send GR0 device info failed");
goto exit;
}
/*
* Only Async CE0 is supported
*/
err = gsp_get_async_ce(g, &ce_dev, instance);
if (err != 0) {
nvgpu_err(g, "Get Async CE0 device info failed");
goto exit;
}
err = gsp_sched_send_devices_info(g,
GSP_SCHED_ASYNC_CE0_DEVICE_ID, &ce_dev);
if (err != 0) {
nvgpu_err(g, "send Async CE0 device info failed");
goto exit;
} }
exit: exit:

View File

@@ -23,9 +23,7 @@
#ifndef NVGPU_GSP_RUNLIST #ifndef NVGPU_GSP_RUNLIST
#define NVGPU_GSP_RUNLIST #define NVGPU_GSP_RUNLIST
#define GSP_SCHED_GR0_DEVICE_ID 0U #define GSP_SCHED_ENGINE_INSTANCE 2U
#define GSP_SCHED_ASYNC_CE0_DEVICE_ID 1U
struct nv_flcn_cmd_gsp; struct nv_flcn_cmd_gsp;
struct gk20a; struct gk20a;
struct nvgpu_gsp_device_info { struct nvgpu_gsp_device_info {

View File

@@ -30,6 +30,9 @@
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
#include <nvgpu/kref.h> #include <nvgpu/kref.h>
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include <nvgpu/gsp_sched.h>
#endif
static struct nvs_sched_ops nvgpu_nvs_ops = { static struct nvs_sched_ops nvgpu_nvs_ops = {
.preempt = NULL, .preempt = NULL,
.recover = NULL, .recover = NULL,
@@ -61,10 +64,6 @@ struct nvgpu_nvs_worker_item {
nvgpu_atomic_t state; nvgpu_atomic_t state;
}; };
static struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id);
static inline struct nvgpu_nvs_worker_item * static inline struct nvgpu_nvs_worker_item *
nvgpu_nvs_worker_item_from_worker_item(struct nvgpu_list_node *node) nvgpu_nvs_worker_item_from_worker_item(struct nvgpu_list_node *node)
{ {
@@ -836,7 +835,7 @@ unlock:
return err; return err;
} }
static struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id) nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id)
{ {
struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_scheduler *sched = g->scheduler;
@@ -855,6 +854,12 @@ nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id)
return NULL; return NULL;
} }
struct nvgpu_nvs_domain *
nvgpu_nvs_get_shadow_domain_locked(struct gk20a *g)
{
return g->scheduler->shadow_domain;
}
struct nvgpu_nvs_domain * struct nvgpu_nvs_domain *
nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id) nvgpu_nvs_domain_by_id(struct gk20a *g, u64 domain_id)
{ {
@@ -1037,3 +1042,69 @@ void nvgpu_nvs_print_domain(struct gk20a *g, struct nvgpu_nvs_domain *domain)
nvs_dbg(g, " preempt grace: %llu ns", nvs_dom->preempt_grace_ns); nvs_dbg(g, " preempt grace: %llu ns", nvs_dom->preempt_grace_ns);
nvs_dbg(g, " domain ID: %llu", domain->id); nvs_dbg(g, " domain ID: %llu", domain->id);
} }
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
s32 nvgpu_nvs_gsp_get_runlist_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
u32 *num_entries, u64 *runlist_iova, u32 *aperture, u32 index)
{
struct nvgpu_runlist_domain *domain;
struct nvgpu_nvs_domain *nvgpu_domain;
s32 err = 0;
if (nvgpu_domain_id == (u64)(SHADOW_DOMAIN_ID)) {
nvgpu_domain = nvgpu_nvs_get_shadow_domain_locked(g);
if (nvgpu_domain == NULL) {
nvgpu_err(g, "gsp nvgpu_domain is NULL");
err = -ENXIO;
goto exit;
}
domain = nvgpu_runlist_get_shadow_domain(g);
} else {
nvgpu_domain = nvgpu_nvs_domain_by_id_locked(g, nvgpu_domain_id);
if (nvgpu_domain == NULL) {
nvgpu_err(g, "gsp nvgpu_domain is NULL");
err = -ENXIO;
goto exit;
}
domain = nvgpu_domain->rl_domains[index];
}
if (domain == NULL) {
nvgpu_err(g, "gsp runlist domain is NULL");
err = -ENXIO;
goto exit;
}
*runlist_iova = nvgpu_mem_get_addr(g, &domain->mem_hw->mem);
*aperture = g->ops.runlist.get_runlist_aperture(g, &domain->mem_hw->mem);
*num_entries = domain->mem_hw->count;
exit:
return err;
}
s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
u32 *domain_id, u32 *timeslice_ns)
{
struct nvgpu_nvs_domain *nvgpu_domain;
s32 err = 0;
if (nvgpu_domain_id == SHADOW_DOMAIN_ID) {
nvgpu_domain = nvgpu_nvs_get_shadow_domain_locked(g);
} else {
nvgpu_domain = nvgpu_nvs_domain_by_id_locked(g, nvgpu_domain_id);
}
if (nvgpu_domain == NULL) {
nvgpu_err(g, "gsp nvgpu_domain is NULL");
err = -ENXIO;
goto exit;
}
*domain_id = u64_lo32(nvgpu_domain->id);
*timeslice_ns = nvgpu_safe_cast_u64_to_u32(
nvgpu_domain->parent->timeslice_ns);
exit:
return err;
}
#endif

View File

@@ -36,7 +36,7 @@ void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
int ga10b_runlist_check_pending(struct gk20a *g, struct nvgpu_runlist *runlist); int ga10b_runlist_check_pending(struct gk20a *g, struct nvgpu_runlist *runlist);
void ga10b_runlist_write_state(struct gk20a *g, u32 runlists_mask, void ga10b_runlist_write_state(struct gk20a *g, u32 runlists_mask,
u32 runlist_state); u32 runlist_state);
u32 ga10b_get_runlist_aperture(struct gk20a *g, struct nvgpu_runlist *runlist); u32 ga10b_get_runlist_aperture(struct gk20a *g, struct nvgpu_mem *mem);
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING #ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch, int ga10b_fifo_reschedule_preempt_next(struct nvgpu_channel *ch,
bool wait_preempt); bool wait_preempt);

View File

@@ -93,9 +93,9 @@ int ga10b_runlist_check_pending(struct gk20a *g, struct nvgpu_runlist *runlist)
return ret; return ret;
} }
u32 ga10b_get_runlist_aperture(struct gk20a *g, struct nvgpu_runlist *runlist) u32 ga10b_get_runlist_aperture(struct gk20a *g, struct nvgpu_mem *mem)
{ {
return nvgpu_aperture_mask(g, &runlist->domain->mem_hw->mem, return nvgpu_aperture_mask(g, mem,
runlist_submit_base_lo_target_sys_mem_noncoherent_f(), runlist_submit_base_lo_target_sys_mem_noncoherent_f(),
runlist_submit_base_lo_target_sys_mem_coherent_f(), runlist_submit_base_lo_target_sys_mem_coherent_f(),
runlist_submit_base_lo_target_vid_mem_f()); runlist_submit_base_lo_target_vid_mem_f());

View File

@@ -102,7 +102,7 @@ struct gops_runlist {
void (*init_enginfo)(struct gk20a *g, struct nvgpu_fifo *f); void (*init_enginfo)(struct gk20a *g, struct nvgpu_fifo *f);
u32 (*get_tsg_max_timeslice)(void); u32 (*get_tsg_max_timeslice)(void);
u32 (*get_runlist_id)(struct gk20a *g, u32 runlist_pri_base); u32 (*get_runlist_id)(struct gk20a *g, u32 runlist_pri_base);
u32 (*get_runlist_aperture)(struct gk20a *g, struct nvgpu_runlist *runlist); u32 (*get_runlist_aperture)(struct gk20a *g, struct nvgpu_mem *mem);
u32 (*get_engine_id_from_rleng_id)(struct gk20a *g, u32 (*get_engine_id_from_rleng_id)(struct gk20a *g,
u32 rleng_id, u32 runlist_pri_base); u32 rleng_id, u32 runlist_pri_base);
u32 (*get_chram_bar0_offset)(struct gk20a *g, u32 runlist_pri_base); u32 (*get_chram_bar0_offset)(struct gk20a *g, u32 runlist_pri_base);

View File

@@ -24,13 +24,13 @@
#define GSP_SCHED_H #define GSP_SCHED_H
struct gk20a; struct gk20a;
struct nvgpu_gsp_sched; struct nvgpu_gsp_sched;
struct nvgpu_runlist;
/* /*
* Scheduler shall support only two engines with two runlists per domain. * Scheduler shall support only two engines with two runlists per domain.
* 1. GR0 * 1. GR0
* 2. Async CE0 * 2. Async CE0
*/ */
#define TOTAL_NO_OF_RUNLISTS 2U #define TOTAL_NO_OF_RUNLISTS 4U
struct nvgpu_gsp_runlist_info { struct nvgpu_gsp_runlist_info {
/* /*
@@ -117,4 +117,9 @@ int nvgpu_gsp_sched_query_active_domain(struct gk20a *g, u32 *active_domain);
int nvgpu_gsp_sched_query_no_of_domains(struct gk20a *g, u32 *no_of_domains); int nvgpu_gsp_sched_query_no_of_domains(struct gk20a *g, u32 *no_of_domains);
int nvgpu_gsp_sched_start(struct gk20a *g); int nvgpu_gsp_sched_start(struct gk20a *g);
int nvgpu_gsp_sched_stop(struct gk20a *g); int nvgpu_gsp_sched_stop(struct gk20a *g);
/* functions to get nvs scheduler and runlist domains info to gsp */
int nvgpu_gsp_nvs_add_domain(struct gk20a *g, u64 nvgpu_domain_id);
int nvgpu_gsp_nvs_delete_domain(struct gk20a *g, u64 nvgpu_domain_id);
int nvgpu_gsp_nvs_update_runlist(struct gk20a *g, const char *name,struct nvgpu_runlist *rl);
int nvgpu_gps_sched_update_runlist(struct gk20a *g, struct nvgpu_runlist *rl);
#endif /* GSP_SCHED_H */ #endif /* GSP_SCHED_H */

View File

@@ -48,6 +48,7 @@ struct nvgpu_runlist;
struct nvgpu_runlist_domain; struct nvgpu_runlist_domain;
struct nvgpu_nvs_ctrl_queue; struct nvgpu_nvs_ctrl_queue;
struct nvgpu_nvs_domain_ctrl_fifo; struct nvgpu_nvs_domain_ctrl_fifo;
struct nvgpu_nvs_domain;
struct nvs_domain_ctrl_fifo_capabilities { struct nvs_domain_ctrl_fifo_capabilities {
/* Store type of scheduler backend */ /* Store type of scheduler backend */
@@ -296,6 +297,9 @@ bool nvgpu_nvs_ctrl_fifo_user_is_subscribed_to_queue(struct nvs_domain_ctrl_fifo
struct nvgpu_nvs_ctrl_queue *queue); struct nvgpu_nvs_ctrl_queue *queue);
void nvgpu_nvs_ctrl_fifo_erase_queue(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue); void nvgpu_nvs_ctrl_fifo_erase_queue(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue);
void nvgpu_nvs_ctrl_fifo_erase_all_queues(struct gk20a *g); void nvgpu_nvs_ctrl_fifo_erase_all_queues(struct gk20a *g);
struct nvgpu_nvs_domain *
nvgpu_nvs_get_shadow_domain_locked(struct gk20a *g);
struct nvgpu_nvs_domain *nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id);
#else #else
@@ -340,6 +344,24 @@ static inline const char *nvgpu_nvs_domain_get_name(struct nvgpu_nvs_domain *dom
(void)dom; (void)dom;
return NULL; return NULL;
} }
static inline struct nvgpu_nvs_domain *
nvgpu_nvs_get_shadow_domain_locked(struct gk20a *g)
{
(void)g;
return NULL;
}
static inline struct nvgpu_nvs_domain *nvgpu_nvs_domain_by_id_locked(struct gk20a *g, u64 domain_id)
{
(void)g;
return NULL;
(void)domain_id;
}
#endif #endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
s32 nvgpu_nvs_gsp_get_runlist_domain_info(struct gk20a *g, u64 nvgpu_domain_id, u32 *num_entries,
u64 *runlist_iova, u32 *aperture, u32 index);
s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
u32 *domain_id, u32 *timeslice_ns);
#endif
#endif #endif

View File

@@ -39,7 +39,12 @@ struct nvgpu_tsg;
struct nvgpu_fifo; struct nvgpu_fifo;
struct nvgpu_channel; struct nvgpu_channel;
struct nvgpu_device; struct nvgpu_device;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
struct nvgpu_gsp_domain_info;
#endif
#ifdef CONFIG_NVS_PRESENT
struct nvgpu_nvs_domain;
#endif
/** @cond DOXYGEN_SHOULD_SKIP_THIS */ /** @cond DOXYGEN_SHOULD_SKIP_THIS */
struct nvgpu_pbdma_info; struct nvgpu_pbdma_info;
@@ -510,4 +515,10 @@ void nvgpu_runlist_init_enginfo(struct gk20a *g, struct nvgpu_fifo *f);
#define rl_dbg(g, fmt, arg...) \ #define rl_dbg(g, fmt, arg...) \
nvgpu_log(g, gpu_dbg_runlists, "RL | " fmt, ##arg) nvgpu_log(g, gpu_dbg_runlists, "RL | " fmt, ##arg)
/* function to get the runlist info for gsp */
s32 nvgpu_runlist_get_device_id(struct gk20a *g, struct nvgpu_runlist *rl, u32 *device_id);
s32 nvgpu_runlist_get_runlist_info(struct gk20a *g, u32 rl_index, u32 *runlist_id,
u8 *device_id);
u32 nvgpu_runlist_get_num_runlists(struct gk20a *g);
struct nvgpu_runlist_domain *nvgpu_runlist_get_shadow_domain(struct gk20a *g);
#endif /* NVGPU_RUNLIST_H */ #endif /* NVGPU_RUNLIST_H */