From 35960f8f4081cd4ebcef5509ce8926bfe2f14db1 Mon Sep 17 00:00:00 2001 From: vivekku Date: Mon, 12 Dec 2022 20:18:47 +0000 Subject: [PATCH] gpu: nvgpu: gsp: call runlist update and send ctrl fifo info Changes: - function calls to add and delete domains - updating runlist - integrating control fifo changes with ioctls to send queue info to GSP FW Bug 3884011 Change-Id: I5ad29eb9501cc2df66843c074ee6a00aae91af23 Signed-off-by: vivekku Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2826482 Reviewed-by: Ramesh Mylavarapu Reviewed-by: Mahantesh Kumbar GVS: Gerrit_Virtual_Submit --- drivers/gpu/nvgpu/common/fifo/runlist.c | 47 +++++-- .../common/gsp_scheduler/gsp_ctrl_fifo.c | 119 ++++++++---------- .../common/gsp_scheduler/gsp_ctrl_fifo.h | 3 +- .../common/gsp_scheduler/gsp_scheduler.c | 5 + .../nvgpu/common/gsp_scheduler/ipc/gsp_cmd.c | 1 - drivers/gpu/nvgpu/common/init/nvgpu_init.c | 2 +- drivers/gpu/nvgpu/common/nvs/nvs_sched.c | 62 +++++++++ drivers/gpu/nvgpu/include/nvgpu/gsp_sched.h | 8 ++ drivers/gpu/nvgpu/include/nvgpu/nvs.h | 5 + drivers/gpu/nvgpu/os/linux/ioctl_nvs.c | 17 +++ 10 files changed, 183 insertions(+), 86 deletions(-) diff --git a/drivers/gpu/nvgpu/common/fifo/runlist.c b/drivers/gpu/nvgpu/common/fifo/runlist.c index 369383384..ed5c1a2dc 100644 --- a/drivers/gpu/nvgpu/common/fifo/runlist.c +++ b/drivers/gpu/nvgpu/common/fifo/runlist.c @@ -555,6 +555,14 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl, } #ifdef CONFIG_NVS_ROUND_ROBIN_SCHEDULER_DISABLE nvgpu_runlist_swap_mem(g, rl->shadow_rl_domain); +#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT) + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + if ((nvgpu_gsp_is_ready(g) == true) + && (nvgpu_nvs_gsp_usr_domain_present(g) == true)) { + ret = nvgpu_gps_sched_update_runlist(g, rl->shadow_rl_domain, rl); + } + } +#endif #endif } @@ -565,6 +573,15 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl, } nvgpu_runlist_swap_mem(g, domain); + /* runlist update */ +#if defined(CONFIG_NVGPU_GSP_SCHEDULER) && defined(CONFIG_NVS_PRESENT) + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + if ((nvgpu_gsp_is_ready(g) == true) + && (nvgpu_nvs_gsp_usr_domain_present(g) == true)) { + ret = nvgpu_gps_sched_update_runlist(g, domain, rl); + } + } +#endif return ret; } @@ -760,20 +777,26 @@ int nvgpu_rl_domain_sync_submit(struct gk20a *g, struct nvgpu_runlist *runlist, struct nvgpu_runlist_domain *next_domain, bool wait_for_finish) { int err = 0; - - if (next_domain == NULL) { - next_domain = runlist->shadow_rl_domain; - } - - runlist_submit_powered(g, runlist, next_domain); - if (wait_for_finish) { - err = nvgpu_runlist_wait_pending_legacy(g, runlist); - if (err != 0) { - nvgpu_err(g, "runlist %d update timeout", runlist->id); - /* trigger runlist update timeout recovery */ - return err; +#ifdef CONFIG_NVS_PRESENT + if (nvgpu_nvs_gsp_usr_domain_present(g) == false) { +#endif + /* schedule runlists for only for shadow domains */ + if (next_domain == NULL) { + next_domain = runlist->shadow_rl_domain; } + + runlist_submit_powered(g, runlist, next_domain); + if (wait_for_finish) { + err = nvgpu_runlist_wait_pending_legacy(g, runlist); + if (err != 0) { + nvgpu_err(g, "runlist %d update timeout", runlist->id); + /* trigger runlist update timeout recovery */ + return err; + } + } +#ifdef CONFIG_NVS_PRESENT } +#endif return err; } diff --git a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.c b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.c index bca0e61be..710e17ad9 100644 --- a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.c +++ b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -24,7 +24,6 @@ #include #include #include -#include #include "gsp_runlist.h" #include "gsp_scheduler.h" @@ -33,88 +32,68 @@ #ifdef CONFIG_NVS_PRESENT static int gsp_ctrl_fifo_get_queue_info(struct gk20a *g, - struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, enum queue_type qtype) + struct nvgpu_gsp_ctrl_fifo_info *ctrl_fifo, struct nvgpu_nvs_ctrl_queue *queue, + enum nvgpu_nvs_ctrl_queue_direction queue_direction) { - int err = 0; - u8 mask; - enum nvgpu_nvs_ctrl_queue_num queue_num; - enum nvgpu_nvs_ctrl_queue_direction queue_direction; - struct nvgpu_nvs_ctrl_queue *queue; + int err = 0; + enum queue_type qtype; - nvgpu_gsp_dbg(g, " "); + if (queue_direction == NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER) { + qtype = CONTROL_QUEUE; + } else if (queue_direction == NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT) { + qtype = RESPONSE_QUEUE; + } else { + nvgpu_err(g, "GSP queue type not supported"); + err = -EINVAL; + goto exit; + } - switch (qtype) { - case CONTROL_QUEUE: - mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE; - queue_num = NVGPU_NVS_NUM_CONTROL; - queue_direction = NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER; - break; - case RESPONSE_QUEUE: - mask = NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ; - queue_num = NVGPU_NVS_NUM_CONTROL; - queue_direction = NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT; - break; - default: - nvgpu_err(g, "queue type invalid"); - err = -EINVAL; - goto exit; - } - - /* below functions will be removed/changed once UMD support is there. */ - queue = nvgpu_nvs_ctrl_fifo_get_queue(g->sched_ctrl_fifo, queue_num, - queue_direction, &mask); - if (queue == NULL) { - nvgpu_err(g, "queue allocation failed"); - err = -EFAULT; - goto exit; - } - /* below functions will be removed/changed once UMD support is there. */ - err = nvgpu_nvs_buffer_alloc(g->sched_ctrl_fifo, NVS_QUEUE_DEFAULT_SIZE, - mask, queue); - if (err != 0) { - nvgpu_err(g, "gsp buffer allocation failed"); - goto exit; - } - ctrl_fifo->fifo_addr_lo = u64_lo32(queue->mem.gpu_va); - ctrl_fifo->fifo_addr_hi = u64_hi32(queue->mem.gpu_va); - ctrl_fifo->queue_size = GSP_CTRL_FIFO_QUEUE_SIZE; - ctrl_fifo->queue_entries = GSP_CTRL_FIFO_QUEUE_ENTRIES; - ctrl_fifo->qtype = qtype; + if (queue == NULL) { + nvgpu_err(g, "GSP ctrlfifo queue is null"); + err = -EFAULT; + goto exit; + } + ctrl_fifo->fifo_addr_lo = u64_lo32(queue->mem.gpu_va); + ctrl_fifo->fifo_addr_hi = u64_hi32(queue->mem.gpu_va); + ctrl_fifo->queue_size = GSP_CTRL_FIFO_QUEUE_SIZE; + ctrl_fifo->queue_entries = GSP_CTRL_FIFO_QUEUE_ENTRIES; + ctrl_fifo->qtype = qtype; exit: - return err; + return err; } /* get and send the control fifo info to gsp */ -int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype) +int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue, + enum nvgpu_nvs_ctrl_queue_direction queue_direction) { - int err = 0; - struct nv_flcn_cmd_gsp cmd = { }; - struct nvgpu_gsp_ctrl_fifo_info ctrl_fifo = {}; + int err = 0; + struct nv_flcn_cmd_gsp cmd = { }; + struct nvgpu_gsp_ctrl_fifo_info ctrl_fifo = {}; - nvgpu_gsp_dbg(g, " "); + /* getting the queue info */ + err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, queue, queue_direction); + if (err != 0) { + nvgpu_err(g, "getting fifo queue info failed"); + goto exit; + } - /* below function will be removed/changed once UMD support is there. */ - err = gsp_ctrl_fifo_get_queue_info(g, &ctrl_fifo, qtype); - if (err != 0) { - nvgpu_err(g, "getting fifo queue info failed"); - goto exit; - } + /* updating the command with control fifo info */ + cmd.cmd.ctrl_fifo.fifo_addr_lo = ctrl_fifo.fifo_addr_lo; + cmd.cmd.ctrl_fifo.fifo_addr_hi = ctrl_fifo.fifo_addr_hi; + cmd.cmd.ctrl_fifo.queue_size = ctrl_fifo.queue_size; + cmd.cmd.ctrl_fifo.qtype = ctrl_fifo.qtype; + cmd.cmd.ctrl_fifo.queue_entries = ctrl_fifo.queue_entries; - cmd.cmd.ctrl_fifo.fifo_addr_lo = ctrl_fifo.fifo_addr_lo; - cmd.cmd.ctrl_fifo.fifo_addr_hi = ctrl_fifo.fifo_addr_hi; - cmd.cmd.ctrl_fifo.queue_size = ctrl_fifo.queue_size; - cmd.cmd.ctrl_fifo.qtype = ctrl_fifo.qtype; - cmd.cmd.ctrl_fifo.queue_entries = ctrl_fifo.queue_entries; - - err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_INFO_SEND, - sizeof(struct nvgpu_gsp_ctrl_fifo_info)); - if (err != 0) { - nvgpu_err(g, "sending control fifo queue to GSP failed"); - } + /* sending control fifo info to GSP */ + err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_INFO_SEND, + sizeof(struct nvgpu_gsp_ctrl_fifo_info)); + if (err != 0) { + nvgpu_err(g, "sending control fifo queue to GSP failed"); + } exit: - return err; + return err; } #endif /* CONFIG_NVS_PRESENT*/ \ No newline at end of file diff --git a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.h b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.h index 76d407cfc..5e5254501 100644 --- a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.h +++ b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_ctrl_fifo.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -57,5 +57,4 @@ struct nvgpu_gsp_ctrl_fifo_info { */ u32 qtype; }; -int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, enum queue_type qtype); #endif/* GSP_CTRL_FIFO_H */ \ No newline at end of file diff --git a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_scheduler.c b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_scheduler.c index de0cfc18f..a0fcd5075 100644 --- a/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_scheduler.c +++ b/drivers/gpu/nvgpu/common/gsp_scheduler/gsp_scheduler.c @@ -245,4 +245,9 @@ int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g) NV_GSP_UNIT_BIND_CTX_REG, 0); return err; +} + +bool nvgpu_gsp_is_ready(struct gk20a *g) +{ + return g->gsp_sched->gsp_ready; } \ No newline at end of file diff --git a/drivers/gpu/nvgpu/common/gsp_scheduler/ipc/gsp_cmd.c b/drivers/gpu/nvgpu/common/gsp_scheduler/ipc/gsp_cmd.c index bbbdcbb16..719417683 100644 --- a/drivers/gpu/nvgpu/common/gsp_scheduler/ipc/gsp_cmd.c +++ b/drivers/gpu/nvgpu/common/gsp_scheduler/ipc/gsp_cmd.c @@ -113,7 +113,6 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd, err = -EINVAL; goto exit; } - /* Sanity check the command input. */ if (!gsp_validate_cmd(gsp_sched, cmd, queue_id)) { err = -EINVAL; diff --git a/drivers/gpu/nvgpu/common/init/nvgpu_init.c b/drivers/gpu/nvgpu/common/init/nvgpu_init.c index b976d2f5e..790602637 100644 --- a/drivers/gpu/nvgpu/common/init/nvgpu_init.c +++ b/drivers/gpu/nvgpu/common/init/nvgpu_init.c @@ -1017,7 +1017,6 @@ int nvgpu_finalize_poweron(struct gk20a *g) NVGPU_INIT_TABLE_ENTRY(g->ops.pmu.pmu_restore_golden_img_state, NO_FLAG), #endif - NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch, NO_FLAG), #ifdef CONFIG_NVGPU_GSP_SCHEDULER @@ -1031,6 +1030,7 @@ int nvgpu_finalize_poweron(struct gk20a *g) NVGPU_SUPPORT_GSP_STEST), #endif #endif + NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG), }; size_t i; diff --git a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c index 180f2643f..72700b6e1 100644 --- a/drivers/gpu/nvgpu/common/nvs/nvs_sched.c +++ b/drivers/gpu/nvgpu/common/nvs/nvs_sched.c @@ -615,6 +615,15 @@ static int nvgpu_nvs_gen_shadow_domain(struct gk20a *g) /* Set active_domain to shadow_domain during Init */ g->scheduler->active_domain = g->scheduler->shadow_domain; +#ifdef CONFIG_NVGPU_GSP_SCHEDULER + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id); + if (err != 0) { + nvgpu_err(g, "add domain for shadow domain failed"); + } + } +#endif + error: return err; } @@ -623,6 +632,7 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g) { struct nvgpu_nvs_scheduler *sched = g->scheduler; struct nvs_domain *nvs_dom; + s32 err = 0; if (sched == NULL) { /* never powered on to init anything */ @@ -637,9 +647,11 @@ static void nvgpu_nvs_remove_shadow_domain(struct gk20a *g) nvgpu_warn(g, "domain %llu is still in use during shutdown! refs: %u", sched->shadow_domain->id, sched->shadow_domain->ref); + nvgpu_err(g, "%u", err); } nvs_dom = sched->shadow_domain->parent; + nvs_domain_destroy(sched->sched, nvs_dom); nvgpu_kfree(g, sched->shadow_domain->rl_domains); @@ -713,6 +725,14 @@ int nvgpu_nvs_open(struct gk20a *g) if (nvgpu_is_enabled(g, NVGPU_SUPPORT_KMD_SCHEDULING_WORKER_THREAD)) { nvgpu_nvs_worker_resume(g); } +#endif +#ifdef CONFIG_NVGPU_GSP_SCHEDULER + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + err = nvgpu_gsp_nvs_add_domain(g, U64_MAX); + if (err != 0) { + nvgpu_err(g, "add domain for shadow domain failed"); + } + } #endif return err; } @@ -781,6 +801,17 @@ unlock: } #endif +#ifdef CONFIG_NVGPU_GSP_SCHEDULER + if (err != 0) { + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + err = nvgpu_gsp_nvs_delete_domain(g, g->scheduler->shadow_domain->id); + if (err != 0) { + nvgpu_err(g, "delete domain for shadow domain failed"); + } + } + } +#endif + nvgpu_mutex_release(&g->sched_mutex); return err; @@ -876,6 +907,16 @@ int nvgpu_nvs_add_domain(struct gk20a *g, const char *name, u64 timeslice, nvgpu_dom->parent = nvs_dom; +#ifdef CONFIG_NVGPU_GSP_SCHEDULER + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + err = nvgpu_gsp_nvs_add_domain(g, nvgpu_dom->id); + if (err != 0) { + nvgpu_err(g, "sending domain info to gsp failed"); + goto unlock; + } + } +#endif + *pdomain = nvgpu_dom; unlock: nvgpu_mutex_release(&g->sched_mutex); @@ -1036,6 +1077,14 @@ int nvgpu_nvs_del_domain(struct gk20a *g, u64 dom_id) nvs_dom = nvgpu_dom->parent; +#ifdef CONFIG_NVGPU_GSP_SCHEDULER + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + err = nvgpu_gsp_nvs_delete_domain(g, dom_id); + if (err != 0) { + nvgpu_err(g, "failed to delete domain"); + } + } +#endif nvgpu_nvs_unlink_rl_domains(g, nvgpu_dom); nvgpu_nvs_delete_rl_domain_mem(g, nvgpu_dom); nvgpu_dom->ref = 0U; @@ -1180,3 +1229,16 @@ exit: return err; } #endif + +#ifdef CONFIG_NVS_PRESENT +bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g) +{ + bool ret = false; + + if (nvs_domain_count(g->scheduler->sched) > 0U) { + /* for count more than 0 user domains are present */ + ret = true; + } + return ret; +} +#endif \ No newline at end of file diff --git a/drivers/gpu/nvgpu/include/nvgpu/gsp_sched.h b/drivers/gpu/nvgpu/include/nvgpu/gsp_sched.h index 74dac8c47..d999e8e26 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/gsp_sched.h +++ b/drivers/gpu/nvgpu/include/nvgpu/gsp_sched.h @@ -22,6 +22,9 @@ #ifndef GSP_SCHED_H #define GSP_SCHED_H + +#include + struct gk20a; struct nvgpu_gsp_sched; struct nvgpu_runlist; @@ -124,4 +127,9 @@ int nvgpu_gsp_nvs_update_runlist(struct gk20a *g, const char *name,struct nvgpu_ int nvgpu_gps_sched_update_runlist(struct gk20a *g, struct nvgpu_runlist_domain *domain, struct nvgpu_runlist *rl); int nvgpu_gsp_sched_bind_ctx_reg(struct gk20a *g); +bool nvgpu_gsp_is_ready(struct gk20a *g); +#ifdef CONFIG_NVS_PRESENT +int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue, + enum nvgpu_nvs_ctrl_queue_direction queue_direction); +#endif #endif /* GSP_SCHED_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvs.h b/drivers/gpu/nvgpu/include/nvgpu/nvs.h index 47dfe62c5..d6b202f0b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvs.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvs.h @@ -929,4 +929,9 @@ s32 nvgpu_nvs_gsp_get_runlist_domain_info(struct gk20a *g, u64 nvgpu_domain_id, s32 nvgpu_nvs_get_gsp_domain_info(struct gk20a *g, u64 nvgpu_domain_id, u32 *domain_id, u32 *timeslice_ns); #endif +#ifdef CONFIG_NVS_PRESENT +/* function to chech if user domain are present or only shadow domain + * exists if domain_list::nr = 0 if only shadow domain is present */ +bool nvgpu_nvs_gsp_usr_domain_present(struct gk20a *g); +#endif #endif diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c b/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c index b7a769444..39d2d9570 100644 --- a/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c +++ b/drivers/gpu/nvgpu/os/linux/ioctl_nvs.c @@ -28,6 +28,10 @@ #include #include +#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER) +#include +#endif + #include "ioctl.h" #include "dmabuf_nvs.h" @@ -905,6 +909,19 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g, err = nvgpu_nvs_get_buf(g, queue, read_only); } + /* + * sending control fifo info to GSP scheduler + * currently only control and message queues + * are supported and not event queue + */ +#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER) + if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) { + if ((nvgpu_gsp_is_ready(g) == true) && + (num_queue == NVGPU_NVS_NUM_CONTROL)) { + err = nvgpu_gsp_sched_send_queue_info(g, queue, queue_direction); + } + } +#endif if (err != 0) { goto fail; }