gpu: nvgpu: gsp: migration from KMD to GSP

Changes:
- submit shadow domain for legacy used cases in case user domain is not
present.
- disabling config flags for KMD to submit user domain.

Bug 3935433
NVGPU-9664

Change-Id: I498226df36d0b482d1af369526adb369d921b6ca
Signed-off-by: vivekku <vivekku@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2843968
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
vivekku
2023-01-15 14:31:47 +00:00
committed by mobile promotions
parent 35960f8f40
commit a2a86eed27
6 changed files with 16 additions and 8 deletions

View File

@@ -698,6 +698,7 @@ static int nvgpu_runlist_do_update(struct gk20a *g, struct nvgpu_runlist *rl,
ret = nvgpu_runlist_update_locked(g, rl, domain, ch, add, wait_for_finish); ret = nvgpu_runlist_update_locked(g, rl, domain, ch, add, wait_for_finish);
if (ret == 0) { if (ret == 0) {
#if defined(CONFIG_NVS_PRESENT) #if defined(CONFIG_NVS_PRESENT)
ret = nvgpu_rl_domain_sync_submit(g, rl, rl->domain, wait_for_finish);
/* /*
* This path(CONFIG_KMD_SCHEDULING_WORKER_THREAD) contains the CPU based * This path(CONFIG_KMD_SCHEDULING_WORKER_THREAD) contains the CPU based
* Manual mode scheduler. With GSP enabled, this will be no longer required * Manual mode scheduler. With GSP enabled, this will be no longer required

View File

@@ -67,6 +67,7 @@ static void nvs_control_atomic64_write(void *address, u64 value)
nvgpu_wmb(); nvgpu_wmb();
} }
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender, void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender,
const u32 msg_number, const u32 msg_sequence_tag, const u32 msg_number, const u32 msg_sequence_tag,
const u64 msg_timestamp_ns) const u64 msg_timestamp_ns)
@@ -320,3 +321,4 @@ void nvs_control_fifo_disable_flow_control(struct nvs_domain_msg_fifo_control *c
nvs_control_atomic_write(&control_interface->get, nvs_control_atomic_write(&control_interface->get,
NVS_DOMAIN_MSG_FIFO_CONTROL_GET_FLOW_CTRL_DISABLED); NVS_DOMAIN_MSG_FIFO_CONTROL_GET_FLOW_CTRL_DISABLED);
} }
#endif

View File

@@ -38,12 +38,11 @@ static struct nvs_sched_ops nvgpu_nvs_ops = {
.recover = NULL, .recover = NULL,
}; };
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
#ifndef NSEC_PER_MSEC #ifndef NSEC_PER_MSEC
#define NSEC_PER_MSEC 1000000U #define NSEC_PER_MSEC 1000000U
#endif #endif
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
/* /*
* TODO: make use of worker items when * TODO: make use of worker items when
* 1) the active domain gets modified * 1) the active domain gets modified
@@ -683,9 +682,9 @@ void nvgpu_nvs_remove_support(struct gk20a *g)
/* never powered on to init anything */ /* never powered on to init anything */
return; return;
} }
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
nvgpu_nvs_worker_deinit(g); nvgpu_nvs_worker_deinit(g);
#endif
nvs_domain_for_each(sched->sched, nvs_dom) { nvs_domain_for_each(sched->sched, nvs_dom) {
struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv; struct nvgpu_nvs_domain *nvgpu_dom = nvs_dom->priv;
if (nvgpu_dom->ref != 1U) { if (nvgpu_dom->ref != 1U) {
@@ -787,7 +786,10 @@ int nvgpu_nvs_open(struct gk20a *g)
} }
g->nvs_worker_submit = nvgpu_nvs_worker_submit; g->nvs_worker_submit = nvgpu_nvs_worker_submit;
#endif
unlock: unlock:
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
if (err) { if (err) {
nvs_dbg(g, " Failed! Error code: %d", err); nvs_dbg(g, " Failed! Error code: %d", err);
if (g->scheduler) { if (g->scheduler) {

View File

@@ -301,7 +301,7 @@ struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g)
return sched; return sched;
} }
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
void nvgpu_nvs_domain_ctrl_fifo_set_receiver(struct gk20a *g, void nvgpu_nvs_domain_ctrl_fifo_set_receiver(struct gk20a *g,
struct nvs_control_fifo_receiver *receiver) struct nvs_control_fifo_receiver *receiver)
{ {
@@ -355,6 +355,7 @@ struct nvs_control_fifo_sender *nvgpu_nvs_domain_ctrl_fifo_get_sender(struct gk2
return sched_ctrl->queues.receiver_queue_sender; return sched_ctrl->queues.receiver_queue_sender;
} }
#endif
bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g) bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g)
{ {
@@ -618,6 +619,7 @@ void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
/* Sets buf->valid as false */ /* Sets buf->valid as false */
(void)memset(buf, 0, sizeof(*buf)); (void)memset(buf, 0, sizeof(*buf));
(void)mask;
} }
void nvgpu_nvs_ctrl_fifo_lock_queues(struct gk20a *g) void nvgpu_nvs_ctrl_fifo_lock_queues(struct gk20a *g)

View File

@@ -70,7 +70,7 @@ struct nvs_control_fifo_receiver {
u32 get_index; u32 get_index;
u64 num_queue_entries; u64 num_queue_entries;
}; };
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender, void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender,
u32 const msg_number, u32 const msg_sequence_tag, u32 const msg_number, u32 const msg_sequence_tag,
u64 const msg_timestamp_ns); u64 const msg_timestamp_ns);
@@ -92,5 +92,6 @@ void nvs_control_fifo_sender_exit(struct gk20a *g,
void nvs_control_fifo_enable_flow_control(struct nvs_domain_msg_fifo_control *control_interface, void nvs_control_fifo_enable_flow_control(struct nvs_domain_msg_fifo_control *control_interface,
u32 get_index); u32 get_index);
void nvs_control_fifo_disable_flow_control(struct nvs_domain_msg_fifo_control *control_interface); void nvs_control_fifo_disable_flow_control(struct nvs_domain_msg_fifo_control *control_interface);
#endif
#endif #endif

View File

@@ -461,8 +461,8 @@ done:
#endif #endif
struct unit_module_test nvgpu_nvs_tests[] = { struct unit_module_test nvgpu_nvs_tests[] = {
UNIT_TEST(init_support, test_fifo_init_support, &nvs_context, 0),
#ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD #ifdef CONFIG_KMD_SCHEDULING_WORKER_THREAD
UNIT_TEST(init_support, test_fifo_init_support, &nvs_context, 0),
UNIT_TEST(setup_sw, test_nvs_setup_sw, &nvs_context, 0), UNIT_TEST(setup_sw, test_nvs_setup_sw, &nvs_context, 0),
UNIT_TEST(nvs_remove_support, test_nvs_remove_sw, &nvs_context, 0), UNIT_TEST(nvs_remove_support, test_nvs_remove_sw, &nvs_context, 0),
#endif #endif