gpu: nvgpu: gsp sched: enable gsp sw init for safety build

Changes
1. Remove dGPU flag dependency on calling gsp sw init on tot.
2. Created Enable flag for gsp scheduler to enable them on ga10b
platforms.
3. Engine config flag is only enabled for dGPU enabled platforms, as gsp
is using engine functions it need to be enabled for all gsp sched
enabled builds.
4. Changes in gsp_sequence_init/de_init where on qnx we are seeing
issues.

NVGPU-9297

Change-Id: Ia4bce85ae8fd2794da1553e9ea418c76845a10ac
Signed-off-by: rmylavarapu <rmylavarapu@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2822537
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
rmylavarapu
2022-12-06 11:39:33 +00:00
committed by mobile promotions
parent 398a30a546
commit 01eb416745
14 changed files with 83 additions and 56 deletions

View File

@@ -132,6 +132,16 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_CHECKER=1
# NVGPU_COMMON_CFLAGS += \ # NVGPU_COMMON_CFLAGS += \
# -DCONFIG_NVGPU_CTXSW_FW_ERROR_HEADER_TESTING # -DCONFIG_NVGPU_CTXSW_FW_ERROR_HEADER_TESTING
# Enable gsp scheduler for safety build
ifneq ($(NVGPU_HVRTOS),1)
CONFIG_NVGPU_GSP_SCHEDULER := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GSP_SCHEDULER
endif
# used by sec2/gsp code
CONFIG_NVGPU_ENGINE_QUEUE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ENGINE_QUEUE
ifeq ($(CONFIG_NVGPU_DGPU),1) ifeq ($(CONFIG_NVGPU_DGPU),1)
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DGPU NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DGPU
@@ -139,10 +149,6 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DGPU
CONFIG_NVGPU_NVLINK := 1 CONFIG_NVGPU_NVLINK := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NVLINK NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_NVLINK
# used by sec2 code
CONFIG_NVGPU_ENGINE_QUEUE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ENGINE_QUEUE
# used in ce_app # used in ce_app
CONFIG_NVGPU_FENCE := 1 CONFIG_NVGPU_FENCE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FENCE NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_FENCE
@@ -220,9 +226,6 @@ endif
CONFIG_NVGPU_ACR_LEGACY := 1 CONFIG_NVGPU_ACR_LEGACY := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ACR_LEGACY NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ACR_LEGACY
CONFIG_NVGPU_ENGINE_QUEUE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ENGINE_QUEUE
CONFIG_NVGPU_DEBUGGER := 1 CONFIG_NVGPU_DEBUGGER := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DEBUGGER NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_DEBUGGER
@@ -368,8 +371,14 @@ NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_MIG
endif endif
# Enable gsp scheduler for normal build # Enable gsp scheduler for normal build
ifneq ($(NVGPU_HVRTOS),1)
CONFIG_NVGPU_GSP_SCHEDULER := 1 CONFIG_NVGPU_GSP_SCHEDULER := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GSP_SCHEDULER NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GSP_SCHEDULER
endif
# used by sec2/gsp code
CONFIG_NVGPU_ENGINE_QUEUE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_ENGINE_QUEUE
# Code to encapsulate the error information into a uniform interface via # Code to encapsulate the error information into a uniform interface via
# nvgpu_err_msg is currently used, but might be needed in future by nvgpu_rm # nvgpu_err_msg is currently used, but might be needed in future by nvgpu_rm

View File

@@ -414,8 +414,10 @@ endif
ifeq ($(CONFIG_NVGPU_ENGINE_QUEUE),1) ifeq ($(CONFIG_NVGPU_ENGINE_QUEUE),1)
srcs += common/engine_queues/engine_mem_queue.c \ srcs += common/engine_queues/engine_mem_queue.c \
common/engine_queues/engine_dmem_queue.c \ common/engine_queues/engine_dmem_queue.c \
common/engine_queues/engine_emem_queue.c \ common/engine_queues/engine_emem_queue.c
common/engine_queues/engine_fb_queue.c ifneq ($(filter 1, $(CONFIG_NVGPU_DGPU) $(NVGPU_HVRTOS)),)
srcs += common/engine_queues/engine_fb_queue.c
endif
endif endif
ifeq ($(CONFIG_NVGPU_GRAPHICS),1) ifeq ($(CONFIG_NVGPU_GRAPHICS),1)

View File

@@ -50,14 +50,20 @@ static int engine_dmem_queue_pop(struct nvgpu_falcon *flcn,
struct gk20a *g = queue->g; struct gk20a *g = queue->g;
int err = 0; int err = 0;
#if (defined(CONFIG_NVGPU_FALCON_DEBUG) || defined(CONFIG_NVGPU_FALCON_NON_FUSA))
err = nvgpu_falcon_copy_from_dmem(flcn, src, data, size, 0); err = nvgpu_falcon_copy_from_dmem(flcn, src, data, size, 0);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "flcn-%d, queue-%d", queue->flcn_id, queue->id); nvgpu_err(g, "flcn-%d, queue-%d", queue->flcn_id, queue->id);
nvgpu_err(g, "dmem queue read failed"); nvgpu_err(g, "dmem queue read failed");
goto exit;
} }
#else
exit: (void)flcn;
(void)src;
(void)data;
(void)size;
(void)g;
err = -EINVAL;
#endif
return err; return err;
} }

View File

@@ -412,11 +412,9 @@ int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p,
case QUEUE_TYPE_DMEM: case QUEUE_TYPE_DMEM:
engine_dmem_queue_init(queue); engine_dmem_queue_init(queue);
break; break;
#ifdef CONFIG_NVGPU_DGPU
case QUEUE_TYPE_EMEM: case QUEUE_TYPE_EMEM:
engine_emem_queue_init(queue); engine_emem_queue_init(queue);
break; break;
#endif
default: default:
err = -EINVAL; err = -EINVAL;
break; break;

View File

@@ -556,11 +556,9 @@ int nvgpu_falcon_sw_init(struct gk20a *g, u32 flcn_id)
nvgpu_mutex_init(&flcn->imem_lock); nvgpu_mutex_init(&flcn->imem_lock);
nvgpu_mutex_init(&flcn->dmem_lock); nvgpu_mutex_init(&flcn->dmem_lock);
#ifdef CONFIG_NVGPU_DGPU
if (flcn->emem_supported) { if (flcn->emem_supported) {
nvgpu_mutex_init(&flcn->emem_lock); nvgpu_mutex_init(&flcn->emem_lock);
} }
#endif
return 0; return 0;
} }
@@ -582,11 +580,10 @@ void nvgpu_falcon_sw_free(struct gk20a *g, u32 flcn_id)
return; return;
} }
#ifdef CONFIG_NVGPU_DGPU
if (flcn->emem_supported) { if (flcn->emem_supported) {
nvgpu_mutex_destroy(&flcn->emem_lock); nvgpu_mutex_destroy(&flcn->emem_lock);
} }
#endif
nvgpu_mutex_destroy(&flcn->dmem_lock); nvgpu_mutex_destroy(&flcn->dmem_lock);
nvgpu_mutex_destroy(&flcn->imem_lock); nvgpu_mutex_destroy(&flcn->imem_lock);
} }

View File

@@ -85,6 +85,10 @@ int nvgpu_gsp_debug_buf_init(struct gk20a *g, u32 queue_no, u32 buffer_size)
nvgpu_err(g, "GSP debug init failed"); nvgpu_err(g, "GSP debug init failed");
} }
} }
#else
(void)queue_no;
(void)buffer_size;
#endif #endif
return err; return err;
} }

View File

@@ -84,7 +84,7 @@ void nvgpu_gsp_sched_sw_deinit(struct gk20a *g)
} }
if (gsp_sched->sequences != NULL) { if (gsp_sched->sequences != NULL) {
nvgpu_gsp_sequences_free(g, gsp_sched->sequences); nvgpu_gsp_sequences_free(g, &gsp_sched->sequences);
} }
nvgpu_gsp_queues_free(g, gsp_sched->queues); nvgpu_gsp_queues_free(g, gsp_sched->queues);
@@ -119,8 +119,6 @@ static int gsp_sched_wait_for_init(struct gk20a *g,
int nvgpu_gsp_sched_sw_init(struct gk20a *g) int nvgpu_gsp_sched_sw_init(struct gk20a *g)
{ {
int err = 0; int err = 0;
struct nvgpu_gsp_sched *gsp_sched;
struct nvgpu_gsp *gsp;
nvgpu_gsp_dbg(g, " "); nvgpu_gsp_dbg(g, " ");
@@ -149,17 +147,14 @@ int nvgpu_gsp_sched_sw_init(struct gk20a *g)
goto de_init; goto de_init;
} }
gsp_sched = g->gsp_sched;
gsp = g->gsp_sched->gsp;
/* gsp falcon software state */ /* gsp falcon software state */
gsp->gsp_flcn = &g->gsp_flcn; g->gsp_sched->gsp->gsp_flcn = &g->gsp_flcn;
gsp->g = g; g->gsp_sched->gsp->g = g;
/* Init isr mutex */ /* Init isr mutex */
nvgpu_mutex_init(&gsp->isr_mutex); nvgpu_mutex_init(&g->gsp_sched->gsp->isr_mutex);
err = nvgpu_gsp_sequences_init(g, gsp_sched); err = nvgpu_gsp_sequences_init(g, &g->gsp_sched);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "GSP sequences init failed"); nvgpu_err(g, "GSP sequences init failed");
goto de_init; goto de_init;

View File

@@ -30,60 +30,61 @@
#include "gsp_seq.h" #include "gsp_seq.h"
static void gsp_sequences_init(struct gk20a *g, static void gsp_sequences_init(struct gk20a *g,
struct gsp_sequences *sequences) struct gsp_sequences **sequences)
{ {
u16 i = 0; u16 i = 0;
nvgpu_gsp_dbg(g, " "); nvgpu_gsp_dbg(g, " ");
(void) memset(sequences->seq, 0, (void) memset((*sequences)->seq, 0,
sizeof(*sequences->seq) * GSP_MAX_NUM_SEQUENCES); sizeof(struct gsp_sequence) * GSP_MAX_NUM_SEQUENCES);
(void) memset(sequences->gsp_seq_tbl, 0, (void) memset((*sequences)->gsp_seq_tbl, 0,
sizeof(sequences->gsp_seq_tbl)); sizeof((*sequences)->gsp_seq_tbl));
for (i = 0; i < GSP_MAX_NUM_SEQUENCES; i++) { for (i = 0; i < GSP_MAX_NUM_SEQUENCES; i++) {
sequences->seq[i].id = (u8)i; (*sequences)->seq[i].id = (u8)i;
} }
} }
int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched) int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched **gsp_sched)
{ {
int err = 0; int err = 0;
struct gsp_sequences *seqs; struct gsp_sequences *seqs = NULL;
nvgpu_gsp_dbg(g, " "); nvgpu_gsp_dbg(g, " ");
seqs = (struct gsp_sequences *) nvgpu_kzalloc(g, sizeof(*seqs->seq)); seqs = (struct gsp_sequences *) nvgpu_kzalloc(g, sizeof(struct gsp_sequences));
if (seqs == NULL) { if (seqs == NULL) {
nvgpu_err(g, "GSP sequences allocation failed"); nvgpu_err(g, "GSP sequences allocation failed");
return -ENOMEM; return -ENOMEM;
} }
seqs->seq = nvgpu_kzalloc(g, seqs->seq = (struct gsp_sequence *) nvgpu_kzalloc(g,
GSP_MAX_NUM_SEQUENCES * sizeof(*seqs->seq)); GSP_MAX_NUM_SEQUENCES * sizeof(struct gsp_sequence));
if (seqs->seq == NULL) { if (seqs->seq == NULL) {
nvgpu_err(g, "GSP sequence allocation failed"); nvgpu_err(g, "GSP sequence allocation failed");
nvgpu_kfree(g, seqs); nvgpu_kfree(g, seqs);
return -ENOMEM; return -ENOMEM;
} }
gsp_sched->sequences = seqs;
gsp_sched->sequences->seq = seqs->seq;
nvgpu_mutex_init(&seqs->gsp_seq_lock); nvgpu_mutex_init(&seqs->gsp_seq_lock);
gsp_sequences_init(g, seqs); gsp_sequences_init(g, &seqs);
(*gsp_sched)->sequences = seqs;
return err; return err;
} }
void nvgpu_gsp_sequences_free(struct gk20a *g, void nvgpu_gsp_sequences_free(struct gk20a *g,
struct gsp_sequences *sequences) struct gsp_sequences **sequences)
{ {
nvgpu_mutex_destroy(&sequences->gsp_seq_lock); nvgpu_mutex_destroy(&(*sequences)->gsp_seq_lock);
nvgpu_kfree(g, sequences->seq); nvgpu_kfree(g, ((*sequences)->seq));
nvgpu_kfree(g, sequences); (*sequences)->seq = NULL;
nvgpu_kfree(g, (*sequences));
*sequences = NULL;
} }
int nvgpu_gsp_seq_acquire(struct gk20a *g, int nvgpu_gsp_seq_acquire(struct gk20a *g,

View File

@@ -56,9 +56,9 @@ struct gsp_sequences {
struct nvgpu_mutex gsp_seq_lock; struct nvgpu_mutex gsp_seq_lock;
}; };
int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched); int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched **gsp_sched);
void nvgpu_gsp_sequences_free(struct gk20a *g, void nvgpu_gsp_sequences_free(struct gk20a *g,
struct gsp_sequences *sequences); struct gsp_sequences **sequences);
int nvgpu_gsp_seq_acquire(struct gk20a *g, int nvgpu_gsp_seq_acquire(struct gk20a *g,
struct gsp_sequences *sequences, struct gsp_sequences *sequences,
struct gsp_sequence **pseq, struct gsp_sequence **pseq,

View File

@@ -363,9 +363,11 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
nvgpu_err(g, "Failed to halt GSP stress test"); nvgpu_err(g, "Failed to halt GSP stress test");
} }
#endif #endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
nvgpu_gsp_sched_suspend(g, g->gsp_sched);
#endif #endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
if (nvgpu_is_enabled(g, NVGPU_SUPPORT_GSP_SCHED)) {
nvgpu_gsp_sched_suspend(g, g->gsp_sched);
}
#endif #endif
nvgpu_falcons_sw_free(g); nvgpu_falcons_sw_free(g);
@@ -990,13 +992,14 @@ int nvgpu_finalize_poweron(struct gk20a *g)
NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(nvgpu_nvs_init, NO_FLAG),
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch, NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
NO_FLAG), NO_FLAG),
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
/* Init gsp ops */ /* Init gsp ops */
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sched_sw_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sched_sw_init, NVGPU_SUPPORT_GSP_SCHED),
#endif #endif
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_stress_test_sw_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_stress_test_sw_init,
NO_FLAG),
#endif #endif
#endif #endif
}; };

View File

@@ -183,7 +183,9 @@ static bool ga10b_gsp_is_interrupted(struct gk20a *g, u32 *intr)
static void ga10b_gsp_handle_swgen1_irq(struct gk20a *g) static void ga10b_gsp_handle_swgen1_irq(struct gk20a *g)
{ {
#ifdef CONFIG_NVGPU_FALCON_DEBUG
int err = 0; int err = 0;
#endif
struct nvgpu_falcon *flcn = NULL; struct nvgpu_falcon *flcn = NULL;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");

View File

@@ -1955,6 +1955,13 @@ int ga10b_init_hal(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE, true); nvgpu_set_enabled(g, NVGPU_SUPPORT_PMU_SUPER_SURFACE, true);
} }
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
/*
* enable gsp scheduler
*/
nvgpu_set_enabled(g, NVGPU_SUPPORT_GSP_SCHED, true);
#endif
/* /*
* enable GSP VM for gsp scheduler firmware * enable GSP VM for gsp scheduler firmware
*/ */

View File

@@ -229,6 +229,7 @@ struct gk20a;
"PES Floorsweeping"), \ "PES Floorsweeping"), \
DEFINE_FLAG(NVGPU_SCHED_EXIT_WAIT_FOR_ERRBAR_SUPPORTED, \ DEFINE_FLAG(NVGPU_SCHED_EXIT_WAIT_FOR_ERRBAR_SUPPORTED, \
"Implicit ERRBAR support"), \ "Implicit ERRBAR support"), \
DEFINE_FLAG(NVGPU_SUPPORT_GSP_SCHED, "To enable gsp sheduler"), \
DEFINE_FLAG(NVGPU_SUPPORT_MULTI_PROCESS_TSG_SHARING, \ DEFINE_FLAG(NVGPU_SUPPORT_MULTI_PROCESS_TSG_SHARING, \
"Multi process TSG sharing support"), \ "Multi process TSG sharing support"), \
DEFINE_FLAG(NVGPU_MAX_ENABLED_BITS, "Marks max number of flags"), DEFINE_FLAG(NVGPU_MAX_ENABLED_BITS, "Marks max number of flags"),

View File

@@ -1087,10 +1087,12 @@ void gk20a_remove_support(struct gk20a *g)
nvgpu_free_cyclestats_snapshot_data(g); nvgpu_free_cyclestats_snapshot_data(g);
#endif #endif
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
nvgpu_gsp_sched_sw_deinit(g); if (nvgpu_is_enabled(g, NVGPU_SUPPORT_GSP_SCHED)) {
nvgpu_gsp_sched_sw_deinit(g);
}
#endif #endif
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
nvgpu_gsp_test_sw_deinit(g); nvgpu_gsp_test_sw_deinit(g);
#endif #endif