gpu: nvgpu: gsp: call runlist update and send ctrl fifo info

Changes:
- function calls to add and delete domains
- updating runlist
- integrating control fifo changes with ioctls to send queue info to GSP
FW

Bug 3884011

Change-Id: I5ad29eb9501cc2df66843c074ee6a00aae91af23
Signed-off-by: vivekku <vivekku@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2826482
Reviewed-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
vivekku
2022-12-12 20:18:47 +00:00
committed by mobile promotions
parent 6b2c080f8f
commit 35960f8f40
10 changed files with 183 additions and 86 deletions

View File

@@ -555,6 +555,14 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
} }
#ifdef CONFIG_NVS_ROUND_ROBIN_SCHEDULER_DISABLE #ifdef CONFIG_NVS_ROUND_ROBIN_SCHEDULER_DISABLE
nvgpu_runlist_swap_mem(g, rl->shadow_rl_domain); nvgpu_runlist_swap_mem(g, rl->shadow_rl_domain);
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT)
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
if ((nvgpu_gsp_is_ready(g) == true)
&& (nvgpu_nvs_gsp_usr_domain_present(g) == true)) {
ret = nvgpu_gps_sched_update_runlist(g, rl->shadow_rl_domain, rl);
}
}
#endif
#endif #endif
} }
@@ -565,6 +573,15 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
} }
nvgpu_runlist_swap_mem(g, domain); nvgpu_runlist_swap_mem(g, domain);
/* runlist update */
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT)
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
if ((nvgpu_gsp_is_ready(g) == true)
&& (nvgpu_nvs_gsp_usr_domain_present(g) == true)) {
ret = nvgpu_gps_sched_update_runlist(g, domain, rl);
}
}
#endif
return ret; return ret;
} }
@@ -760,20 +777,26 @@ int nvgpu_rl_domain_sync_submit(struct gk20a *g, struct nvgpu_runlist *runlist,
struct nvgpu_runlist_domain *next_domain, bool wait_for_finish) struct nvgpu_runlist_domain *next_domain, bool wait_for_finish)
{ {
int err = 0; int err = 0;
#ifdef CONFIG_NVS_PRESENT
if (next_domain == NULL) { if (nvgpu_nvs_gsp_usr_domain_present(g) == false) {
next_domain = runlist->shadow_rl_domain; #endif
} /* schedule runlists for only for shadow domains */
if (next_domain == NULL) {
runlist_submit_powered(g, runlist, next_domain); next_domain = runlist->shadow_rl_domain;
if (wait_for_finish) {
err = nvgpu_runlist_wait_pending_legacy(g, runlist);
if (err != 0) {
nvgpu_err(g, "runlist %d update timeout", runlist->id);
/* trigger runlist update timeout recovery */
return err;
} }
runlist_submit_powered(g, runlist, next_domain);
if (wait_for_finish) {
err = nvgpu_runlist_wait_pending_legacy(g, runlist);
if (err != 0) {
nvgpu_err(g, "runlist %d update timeout", runlist->id);
/* trigger runlist update timeout recovery */
return err;
}
}
#ifdef CONFIG_NVS_PRESENT
} }
#endif
return err; return err;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,7 +24,6 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include <nvgpu/gsp_sched.h> #include <nvgpu/gsp_sched.h>
#include <nvgpu/nvs.h>
#include "gsp_runlist.h" #include "gsp_runlist.h"
#include "gsp_scheduler.h" #include "gsp_scheduler.h"
@@ -33,88 +32,68 @@
#ifdef CONFIG_NVS_PRESENT #ifdef CONFIG_NVS_PRESENT
static int gsp_ctrl_fifo_get_queue_info(struct gk20a *g, static int gsp_ctrl_fifo_get_queue_info(struct gk20a *g,
struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, enum queue_type qtype) struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, struct nvgpu_nvs_ctrl_queue *queue,
enum nvgpu_nvs_ctrl_queue_direction queue_direction)
{ {
int err = 0; int err = 0;
u8 mask; enum queue_type qtype;
enum nvgpu_nvs_ctrl_queue_num queue_num;
enum nvgpu_nvs_ctrl_queue_direction queue_direction;
struct nvgpu_nvs_ctrl_queue *queue;
nvgpu_gsp_dbg(g, " "); if (queue_direction == NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER) {
qtype = CONTROL_QUEUE;
} else if (queue_direction == NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT) {
qtype = RESPONSE_QUEUE;
} else {
nvgpu_err(g, "GSP queue type not supported");
err = -EINVAL;
goto exit;
}
switch (qtype) { if (queue == NULL) {
case CONTROL_QUEUE: nvgpu_err(g, "GSP ctrlfifo queue is null");
mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE; err = -EFAULT;
queue_num = NVGPU_NVS_NUM_CONTROL; goto exit;
queue_direction = NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER; }
break; ctrl_fifo->fifo_addr_lo = u64_lo32(queue->mem.gpu_va);
case RESPONSE_QUEUE: ctrl_fifo->fifo_addr_hi = u64_hi32(queue->mem.gpu_va);
mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ; ctrl_fifo->queue_size = GSP_CTRL_FIFO_QUEUE_SIZE;
queue_num = NVGPU_NVS_NUM_CONTROL; ctrl_fifo->queue_entries = GSP_CTRL_FIFO_QUEUE_ENTRIES;
queue_direction = NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT; ctrl_fifo->qtype = qtype;
break;
default:
nvgpu_err(g, "queue type invalid");
err = -EINVAL;
goto exit;
}
/* below functions will be removed/changed once UMD support is there. */
queue = nvgpu_nvs_ctrl_fifo_get_queue(g->sched_ctrl_fifo, queue_num,
queue_direction, &mask);
if (queue == NULL) {
nvgpu_err(g, "queue allocation failed");
err = -EFAULT;
goto exit;
}
/* below functions will be removed/changed once UMD support is there. */
err = nvgpu_nvs_buffer_alloc(g->sched_ctrl_fifo, NVS_QUEUE_DEFAULT_SIZE,
mask, queue);
if (err != 0) {
nvgpu_err(g, "gsp buffer allocation failed");
goto exit;
}
ctrl_fifo->fifo_addr_lo = u64_lo32(queue->mem.gpu_va);
ctrl_fifo->fifo_addr_hi = u64_hi32(queue->mem.gpu_va);
ctrl_fifo->queue_size = GSP_CTRL_FIFO_QUEUE_SIZE;
ctrl_fifo->queue_entries = GSP_CTRL_FIFO_QUEUE_ENTRIES;
ctrl_fifo->qtype = qtype;
exit: exit:
return err; return err;
} }
/* get and send the control fifo info to gsp */ /* get and send the control fifo info to gsp */
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype) int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue,
enum nvgpu_nvs_ctrl_queue_direction queue_direction)
{ {
int err = 0; int err = 0;
struct nv_flcn_cmd_gsp cmd = { }; struct nv_flcn_cmd_gsp cmd = { };
struct nvgpu_gsp_ctrl_fifo_info ctrl_fifo = {}; struct nvgpu_gsp_ctrl_fifo_info ctrl_fifo = {};
nvgpu_gsp_dbg(g, " "); /* getting the queue info */
err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, queue, queue_direction);
if (err != 0) {
nvgpu_err(g, "getting fifo queue info failed");
goto exit;
}
/* below function will be removed/changed once UMD support is there. */ /* updating the command with control fifo info */
err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, qtype); cmd.cmd.ctrl_fifo.fifo_addr_lo = ctrl_fifo.fifo_addr_lo;
if (err != 0) { cmd.cmd.ctrl_fifo.fifo_addr_hi = ctrl_fifo.fifo_addr_hi;
nvgpu_err(g, "getting fifo queue info failed"); cmd.cmd.ctrl_fifo.queue_size = ctrl_fifo.queue_size;
goto exit; cmd.cmd.ctrl_fifo.qtype = ctrl_fifo.qtype;
} cmd.cmd.ctrl_fifo.queue_entries = ctrl_fifo.queue_entries;
cmd.cmd.ctrl_fifo.fifo_addr_lo = ctrl_fifo.fifo_addr_lo; /* sending control fifo info to GSP */
cmd.cmd.ctrl_fifo.fifo_addr_hi = ctrl_fifo.fifo_addr_hi; err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_INFO_SEND,
cmd.cmd.ctrl_fifo.queue_size = ctrl_fifo.queue_size; sizeof(struct nvgpu_gsp_ctrl_fifo_info));
cmd.cmd.ctrl_fifo.qtype = ctrl_fifo.qtype; if (err != 0) {
cmd.cmd.ctrl_fifo.queue_entries = ctrl_fifo.queue_entries; nvgpu_err(g, "sending control fifo queue to GSP failed");
}
err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_INFO_SEND,
sizeof(struct nvgpu_gsp_ctrl_fifo_info));
if (err != 0) {
nvgpu_err(g, "sending control fifo queue to GSP failed");
}
exit: exit:
return err; return err;
} }
#endif /* CONFIG_NVS_PRESENT*/ #endif /* CONFIG_NVS_PRESENT*/

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -57,5 +57,4 @@ struct nvgpu_gsp_ctrl_fifo_info {
*/ */
u32 qtype; u32 qtype;
}; };
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype);
#endif/* GSP_CTRL_FIFO_H */ #endif/* GSP_CTRL_FIFO_H */

View File

@@ -246,3 +246,8 @@ int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g)
return err; return err;
} }
bool nvgpu_gsp_is_ready(struct gk20a *g)
{
return g->gsp_sched->gsp_ready;
}

View File

@@ -113,7 +113,6 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
err = -EINVAL; err = -EINVAL;
goto exit; goto exit;
} }
/* Sanity check the command input. */ /* Sanity check the command input. */
if (!gsp_validate_cmd(gsp_sched, cmd, queue_id)) { if (!gsp_validate_cmd(gsp_sched, cmd, queue_id)) {
err = -EINVAL; err = -EINVAL;

View File

@@ -1017,7 +1017,6 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_restore_golden_img_state, NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_restore_golden_img_state,
NO_FLAG), NO_FLAG),
#endif #endif
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch, NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
NO_FLAG), NO_FLAG),
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
@@ -1031,6 +1030,7 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_SUPPORT_GSP_STEST), NVGPU_SUPPORT_GSP_STEST),
#endif #endif
#endif #endif
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
}; };
size_t i; size_t i;

View File

@@ -615,6 +615,15 @@ static int nvgpu_nvs_gen_shadow_domain(struct gk20a *g)
/* Set active_domain to shadow_domain during Init */ /* Set active_domain to shadow_domain during Init */
g->scheduler->active_domain = g->scheduler->shadow_domain; g->scheduler->active_domain = g->scheduler->shadow_domain;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id);
if (err != 0) {
nvgpu_err(g, "add domain for shadow domain failed");
}
}
#endif
error: error:
return err; return err;
} }
@@ -623,6 +632,7 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g)
{ {
struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvgpu_nvs_scheduler *sched = g->scheduler;
struct nvs_domain *nvs_dom; struct nvs_domain *nvs_dom;
s32 err = 0;
if (sched == NULL) { if (sched == NULL) {
/* never powered on to init anything */ /* never powered on to init anything */
@@ -637,9 +647,11 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g)
nvgpu_warn(g, nvgpu_warn(g,
"domain %llu is still in use during shutdown! refs: %u", "domain %llu is still in use during shutdown! refs: %u",
sched->shadow_domain->id, sched->shadow_domain->ref); sched->shadow_domain->id, sched->shadow_domain->ref);
nvgpu_err(g, "%u", err);
} }
nvs_dom = sched->shadow_domain->parent; nvs_dom = sched->shadow_domain->parent;
nvs_domain_destroy(sched->sched, nvs_dom); nvs_domain_destroy(sched->sched, nvs_dom);
nvgpu_kfree(g, sched->shadow_domain->rl_domains); nvgpu_kfree(g, sched->shadow_domain->rl_domains);
@@ -713,6 +725,14 @@ int nvgpu_nvs_open(struct gk20a *g)
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_KMD_SCHEDULING_WORKER_THREAD)) { if (nvgpu_is_enabled(g, NVGPU_SUPPORT_KMD_SCHEDULING_WORKER_THREAD)) {
nvgpu_nvs_worker_resume(g); nvgpu_nvs_worker_resume(g);
} }
#endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
err = nvgpu_gsp_nvs_add_domain(g, U64_MAX);
if (err != 0) {
nvgpu_err(g, "add domain for shadow domain failed");
}
}
#endif #endif
return err; return err;
} }
@@ -781,6 +801,17 @@ unlock:
} }
#endif #endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (err != 0) {
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
err = nvgpu_gsp_nvs_delete_domain(g, g->scheduler->shadow_domain->id);
if (err != 0) {
nvgpu_err(g, "delete domain for shadow domain failed");
}
}
}
#endif
nvgpu_mutex_release(&g->sched_mutex); nvgpu_mutex_release(&g->sched_mutex);
return err; return err;
@@ -876,6 +907,16 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice,
nvgpu_dom->parent = nvs_dom; nvgpu_dom->parent = nvs_dom;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id);
if (err != 0) {
nvgpu_err(g, "sending domain info to gsp failed");
goto unlock;
}
}
#endif
*pdomain = nvgpu_dom; *pdomain = nvgpu_dom;
unlock: unlock:
nvgpu_mutex_release(&g->sched_mutex); nvgpu_mutex_release(&g->sched_mutex);
@@ -1036,6 +1077,14 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id)
nvs_dom = nvgpu_dom->parent; nvs_dom = nvgpu_dom->parent;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
err = nvgpu_gsp_nvs_delete_domain(g, dom_id);
if (err != 0) {
nvgpu_err(g, "failed to delete domain");
}
}
#endif
nvgpu_nvs_unlink_rl_domains(g, nvgpu_dom); nvgpu_nvs_unlink_rl_domains(g, nvgpu_dom);
nvgpu_nvs_delete_rl_domain_mem(g, nvgpu_dom); nvgpu_nvs_delete_rl_domain_mem(g, nvgpu_dom);
nvgpu_dom->ref = 0U; nvgpu_dom->ref = 0U;
@@ -1180,3 +1229,16 @@ exit:
return err; return err;
} }
#endif #endif
#ifdef CONFIG_NVS_PRESENT
bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g)
{
bool ret = false;
if (nvs_domain_count(g->scheduler->sched) > 0U) {
/* for count more than 0 user domains are present */
ret = true;
}
return ret;
}
#endif

View File

@@ -22,6 +22,9 @@
#ifndef GSP_SCHED_H #ifndef GSP_SCHED_H
#define GSP_SCHED_H #define GSP_SCHED_H
#include <nvgpu/nvs.h>
struct gk20a; struct gk20a;
struct nvgpu_gsp_sched; struct nvgpu_gsp_sched;
struct nvgpu_runlist; struct nvgpu_runlist;
@@ -124,4 +127,9 @@ int nvgpu_gsp_nvs_update_runlist(struct gk20a *g, const char *name,struct nvgpu_
int nvgpu_gps_sched_update_runlist(struct gk20a *g, int nvgpu_gps_sched_update_runlist(struct gk20a *g,
struct nvgpu_runlist_domain *domain, struct nvgpu_runlist *rl); struct nvgpu_runlist_domain *domain, struct nvgpu_runlist *rl);
int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g); int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g);
bool nvgpu_gsp_is_ready(struct gk20a *g);
#ifdef CONFIG_NVS_PRESENT
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue,
enum nvgpu_nvs_ctrl_queue_direction queue_direction);
#endif
#endif /* GSP_SCHED_H */ #endif /* GSP_SCHED_H */

View File

@@ -929,4 +929,9 @@ s32 nvgpu_nvs_gsp_get_runlist_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id, s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id,
u32 *domain_id, u32 *timeslice_ns); u32 *domain_id, u32 *timeslice_ns);
#endif #endif
#ifdef CONFIG_NVS_PRESENT
/* function to chech if user domain are present or only shadow domain
* exists if domain_list::nr = 0 if only shadow domain is present */
bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g);
#endif
#endif #endif

View File

@@ -28,6 +28,10 @@
#include <nvs/nvs_sched.h> #include <nvs/nvs_sched.h>
#include <nvs/domain.h> #include <nvs/domain.h>
#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER)
#include <nvgpu/gsp_sched.h>
#endif
#include "ioctl.h" #include "ioctl.h"
#include "dmabuf_nvs.h" #include "dmabuf_nvs.h"
@@ -905,6 +909,19 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
err = nvgpu_nvs_get_buf(g, queue, read_only); err = nvgpu_nvs_get_buf(g, queue, read_only);
} }
/*
* sending control fifo info to GSP scheduler
* currently only control and message queues
* are supported and not event queue
*/
#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER)
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
if ((nvgpu_gsp_is_ready(g) == true) &&
(num_queue == NVGPU_NVS_NUM_CONTROL)) {
err = nvgpu_gsp_sched_send_queue_info(g, queue, queue_direction);
}
}
#endif
if (err != 0) { if (err != 0) {
goto fail; goto fail;
} }