mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: gsp: call runlist update and send ctrl fifo info
Changes: - function calls to add and delete domains - updating runlist - integrating control fifo changes with ioctls to send queue info to GSP FW Bug 3884011 Change-Id: I5ad29eb9501cc2df66843c074ee6a00aae91af23 Signed-off-by: vivekku <vivekku@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2826482 Reviewed-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com> Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
6b2c080f8f
commit
35960f8f40
@@ -555,6 +555,14 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
}
|
||||
#ifdef CONFIG_NVS_ROUND_ROBIN_SCHEDULER_DISABLE
|
||||
nvgpu_runlist_swap_mem(g, rl->shadow_rl_domain);
|
||||
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT)
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
if ((nvgpu_gsp_is_ready(g) == true)
|
||||
&& (nvgpu_nvs_gsp_usr_domain_present(g) == true)) {
|
||||
ret = nvgpu_gps_sched_update_runlist(g, rl->shadow_rl_domain, rl);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -565,6 +573,15 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
}
|
||||
|
||||
nvgpu_runlist_swap_mem(g, domain);
|
||||
/* runlist update */
|
||||
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT)
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
if ((nvgpu_gsp_is_ready(g) == true)
|
||||
&& (nvgpu_nvs_gsp_usr_domain_present(g) == true)) {
|
||||
ret = nvgpu_gps_sched_update_runlist(g, domain, rl);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -760,7 +777,10 @@ int nvgpu_rl_domain_sync_submit(struct gk20a *g, struct nvgpu_runlist *runlist,
|
||||
struct nvgpu_runlist_domain *next_domain, bool wait_for_finish)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
if (nvgpu_nvs_gsp_usr_domain_present(g) == false) {
|
||||
#endif
|
||||
/* schedule runlists for only for shadow domains */
|
||||
if (next_domain == NULL) {
|
||||
next_domain = runlist->shadow_rl_domain;
|
||||
}
|
||||
@@ -774,6 +794,9 @@ int nvgpu_rl_domain_sync_submit(struct gk20a *g, struct nvgpu_runlist *runlist,
|
||||
return err;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
}
|
||||
#endif
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -24,7 +24,6 @@
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/gsp.h>
|
||||
#include <nvgpu/gsp_sched.h>
|
||||
#include <nvgpu/nvs.h>
|
||||
|
||||
#include "gsp_runlist.h"
|
||||
#include "gsp_scheduler.h"
|
||||
@@ -33,48 +32,27 @@
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
static int gsp_ctrl_fifo_get_queue_info(struct gk20a *g,
|
||||
struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, enum queue_type qtype)
|
||||
struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, struct nvgpu_nvs_ctrl_queue *queue,
|
||||
enum nvgpu_nvs_ctrl_queue_direction queue_direction)
|
||||
{
|
||||
int err = 0;
|
||||
u8 mask;
|
||||
enum nvgpu_nvs_ctrl_queue_num queue_num;
|
||||
enum nvgpu_nvs_ctrl_queue_direction queue_direction;
|
||||
struct nvgpu_nvs_ctrl_queue *queue;
|
||||
enum queue_type qtype;
|
||||
|
||||
nvgpu_gsp_dbg(g, " ");
|
||||
|
||||
switch (qtype) {
|
||||
case CONTROL_QUEUE:
|
||||
mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE;
|
||||
queue_num = NVGPU_NVS_NUM_CONTROL;
|
||||
queue_direction = NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER;
|
||||
break;
|
||||
case RESPONSE_QUEUE:
|
||||
mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ;
|
||||
queue_num = NVGPU_NVS_NUM_CONTROL;
|
||||
queue_direction = NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT;
|
||||
break;
|
||||
default:
|
||||
nvgpu_err(g, "queue type invalid");
|
||||
if (queue_direction == NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER) {
|
||||
qtype = CONTROL_QUEUE;
|
||||
} else if (queue_direction == NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT) {
|
||||
qtype = RESPONSE_QUEUE;
|
||||
} else {
|
||||
nvgpu_err(g, "GSP queue type not supported");
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* below functions will be removed/changed once UMD support is there. */
|
||||
queue = nvgpu_nvs_ctrl_fifo_get_queue(g->sched_ctrl_fifo, queue_num,
|
||||
queue_direction, &mask);
|
||||
if (queue == NULL) {
|
||||
nvgpu_err(g, "queue allocation failed");
|
||||
nvgpu_err(g, "GSP ctrlfifo queue is null");
|
||||
err = -EFAULT;
|
||||
goto exit;
|
||||
}
|
||||
/* below functions will be removed/changed once UMD support is there. */
|
||||
err = nvgpu_nvs_buffer_alloc(g->sched_ctrl_fifo, NVS_QUEUE_DEFAULT_SIZE,
|
||||
mask, queue);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "gsp buffer allocation failed");
|
||||
goto exit;
|
||||
}
|
||||
ctrl_fifo->fifo_addr_lo = u64_lo32(queue->mem.gpu_va);
|
||||
ctrl_fifo->fifo_addr_hi = u64_hi32(queue->mem.gpu_va);
|
||||
ctrl_fifo->queue_size = GSP_CTRL_FIFO_QUEUE_SIZE;
|
||||
@@ -87,27 +65,28 @@ exit:
|
||||
}
|
||||
|
||||
/* get and send the control fifo info to gsp */
|
||||
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype)
|
||||
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue,
|
||||
enum nvgpu_nvs_ctrl_queue_direction queue_direction)
|
||||
{
|
||||
int err = 0;
|
||||
struct nv_flcn_cmd_gsp cmd = { };
|
||||
struct nvgpu_gsp_ctrl_fifo_info ctrl_fifo = {};
|
||||
|
||||
nvgpu_gsp_dbg(g, " ");
|
||||
|
||||
/* below function will be removed/changed once UMD support is there. */
|
||||
err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, qtype);
|
||||
/* getting the queue info */
|
||||
err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, queue, queue_direction);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "getting fifo queue info failed");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* updating the command with control fifo info */
|
||||
cmd.cmd.ctrl_fifo.fifo_addr_lo = ctrl_fifo.fifo_addr_lo;
|
||||
cmd.cmd.ctrl_fifo.fifo_addr_hi = ctrl_fifo.fifo_addr_hi;
|
||||
cmd.cmd.ctrl_fifo.queue_size = ctrl_fifo.queue_size;
|
||||
cmd.cmd.ctrl_fifo.qtype = ctrl_fifo.qtype;
|
||||
cmd.cmd.ctrl_fifo.queue_entries = ctrl_fifo.queue_entries;
|
||||
|
||||
/* sending control fifo info to GSP */
|
||||
err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_INFO_SEND,
|
||||
sizeof(struct nvgpu_gsp_ctrl_fifo_info));
|
||||
if (err != 0) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@@ -57,5 +57,4 @@ struct nvgpu_gsp_ctrl_fifo_info {
|
||||
*/
|
||||
u32 qtype;
|
||||
};
|
||||
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype);
|
||||
#endif/* GSP_CTRL_FIFO_H */
|
||||
@@ -246,3 +246,8 @@ int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g)
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
bool nvgpu_gsp_is_ready(struct gk20a *g)
|
||||
{
|
||||
return g->gsp_sched->gsp_ready;
|
||||
}
|
||||
@@ -113,7 +113,6 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
|
||||
err = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Sanity check the command input. */
|
||||
if (!gsp_validate_cmd(gsp_sched, cmd, queue_id)) {
|
||||
err = -EINVAL;
|
||||
|
||||
@@ -1017,7 +1017,6 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_restore_golden_img_state,
|
||||
NO_FLAG),
|
||||
#endif
|
||||
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
|
||||
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
|
||||
NO_FLAG),
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
@@ -1031,6 +1030,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
|
||||
NVGPU_SUPPORT_GSP_STEST),
|
||||
#endif
|
||||
#endif
|
||||
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
|
||||
};
|
||||
size_t i;
|
||||
|
||||
|
||||
@@ -615,6 +615,15 @@ static int nvgpu_nvs_gen_shadow_domain(struct gk20a *g)
|
||||
/* Set active_domain to shadow_domain during Init */
|
||||
g->scheduler->active_domain = g->scheduler->shadow_domain;
|
||||
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "add domain for shadow domain failed");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
error:
|
||||
return err;
|
||||
}
|
||||
@@ -623,6 +632,7 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g)
|
||||
{
|
||||
struct nvgpu_nvs_scheduler *sched = g->scheduler;
|
||||
struct nvs_domain *nvs_dom;
|
||||
s32 err = 0;
|
||||
|
||||
if (sched == NULL) {
|
||||
/* never powered on to init anything */
|
||||
@@ -637,9 +647,11 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g)
|
||||
nvgpu_warn(g,
|
||||
"domain %llu is still in use during shutdown! refs: %u",
|
||||
sched->shadow_domain->id, sched->shadow_domain->ref);
|
||||
nvgpu_err(g, "%u", err);
|
||||
}
|
||||
|
||||
nvs_dom = sched->shadow_domain->parent;
|
||||
|
||||
nvs_domain_destroy(sched->sched, nvs_dom);
|
||||
|
||||
nvgpu_kfree(g, sched->shadow_domain->rl_domains);
|
||||
@@ -713,6 +725,14 @@ int nvgpu_nvs_open(struct gk20a *g)
|
||||
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_KMD_SCHEDULING_WORKER_THREAD)) {
|
||||
nvgpu_nvs_worker_resume(g);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
err = nvgpu_gsp_nvs_add_domain(g, U64_MAX);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "add domain for shadow domain failed");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
@@ -781,6 +801,17 @@ unlock:
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
if (err != 0) {
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
err = nvgpu_gsp_nvs_delete_domain(g, g->scheduler->shadow_domain->id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "delete domain for shadow domain failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
nvgpu_mutex_release(&g->sched_mutex);
|
||||
|
||||
return err;
|
||||
@@ -876,6 +907,16 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice,
|
||||
|
||||
nvgpu_dom->parent = nvs_dom;
|
||||
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "sending domain info to gsp failed");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
*pdomain = nvgpu_dom;
|
||||
unlock:
|
||||
nvgpu_mutex_release(&g->sched_mutex);
|
||||
@@ -1036,6 +1077,14 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
|
||||
|
||||
nvs_dom = nvgpu_dom->parent;
|
||||
|
||||
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
err = nvgpu_gsp_nvs_delete_domain(g, dom_id);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "failed to delete domain");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
nvgpu_nvs_unlink_rl_domains(g, nvgpu_dom);
|
||||
nvgpu_nvs_delete_rl_domain_mem(g, nvgpu_dom);
|
||||
nvgpu_dom->ref = 0U;
|
||||
@@ -1180,3 +1229,16 @@ exit:
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (nvs_domain_count(g->scheduler->sched) > 0U) {
|
||||
/* for count more than 0 user domains are present */
|
||||
ret = true;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
@@ -22,6 +22,9 @@
|
||||
|
||||
#ifndef GSP_SCHED_H
|
||||
#define GSP_SCHED_H
|
||||
|
||||
#include <nvgpu/nvs.h>
|
||||
|
||||
struct gk20a;
|
||||
struct nvgpu_gsp_sched;
|
||||
struct nvgpu_runlist;
|
||||
@@ -124,4 +127,9 @@ int nvgpu_gsp_nvs_update_runlist(struct gk20a *g, const char *name,struct nvgpu_
|
||||
int nvgpu_gps_sched_update_runlist(struct gk20a *g,
|
||||
struct nvgpu_runlist_domain *domain, struct nvgpu_runlist *rl);
|
||||
int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g);
|
||||
bool nvgpu_gsp_is_ready(struct gk20a *g);
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue,
|
||||
enum nvgpu_nvs_ctrl_queue_direction queue_direction);
|
||||
#endif
|
||||
#endif /* GSP_SCHED_H */
|
||||
|
||||
@@ -929,4 +929,9 @@ s32 nvgpu_nvs_gsp_get_runlist_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
|
||||
s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
|
||||
u32 *domain_id, u32 *timeslice_ns);
|
||||
#endif
|
||||
#ifdef CONFIG_NVS_PRESENT
|
||||
/* function to chech if user domain are present or only shadow domain
|
||||
* exists if domain_list::nr = 0 if only shadow domain is present */
|
||||
bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@@ -28,6 +28,10 @@
|
||||
#include <nvs/nvs_sched.h>
|
||||
#include <nvs/domain.h>
|
||||
|
||||
#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER)
|
||||
#include <nvgpu/gsp_sched.h>
|
||||
#endif
|
||||
|
||||
#include "ioctl.h"
|
||||
#include "dmabuf_nvs.h"
|
||||
|
||||
@@ -905,6 +909,19 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
|
||||
err = nvgpu_nvs_get_buf(g, queue, read_only);
|
||||
}
|
||||
|
||||
/*
|
||||
* sending control fifo info to GSP scheduler
|
||||
* currently only control and message queues
|
||||
* are supported and not event queue
|
||||
*/
|
||||
#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER)
|
||||
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
|
||||
if ((nvgpu_gsp_is_ready(g) == true) &&
|
||||
(num_queue == NVGPU_NVS_NUM_CONTROL)) {
|
||||
err = nvgpu_gsp_sched_send_queue_info(g, queue, queue_direction);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (err != 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user