gpu: nvgpu: gsp units separation

Separated gsp unit into three unit:
- GSP unit which holds the core functionality of GSP RISCV core,
  bootstrap, interrupt, etc.
- GSP Scheduler to hold the cmd/msg management, IPC, etc.
- GSP Test to hold stress test ucode specific support.

NVGPU-7492

Signed-off-by: Ramesh Mylavarapu <rmylavarapu@nvidia.com>
Change-Id: I12340dc776d610502f28c8574843afc7481c0871
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2660619
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Ramesh Mylavarapu
2022-02-03 17:37:07 +05:30
committed by mobile promotions
parent 14ed75e857
commit 9302b2efee
29 changed files with 871 additions and 630 deletions

View File

@@ -315,22 +315,33 @@ gsp:
owner: Ramesh M owner: Ramesh M
gpu: igpu gpu: igpu
sources: [ common/gsp/gsp_init.c, sources: [ common/gsp/gsp_init.c,
common/gsp/gsp_priv.h,
common/gsp/gsp_bootstrap.c, common/gsp/gsp_bootstrap.c,
common/gsp/gsp_bootstrap.h,
common/gsp/gsp_test.c,
include/nvgpu/gsp.h,
include/nvgpu/gsp/gsp_test.h,
common/gsp/ipc/gsp_seq.c,
common/gsp/ipc/gsp_seq.h,
common/gsp/ipc/gsp_queue.c,
common/gsp/ipc/gsp_queue.h,
common/gsp/ipc/gsp_cmd.c,
common/gsp/ipc/gsp_cmd.h,
common/gsp/ipc/gsp_msg.c,
common/gsp/ipc/gsp_msg.h,
include/nvgpu/gsp.h ] include/nvgpu/gsp.h ]
gsp_sched:
safe: no
owner: Ramesh M
gpu: igpu
sources: [ common/gsp_scheduler/ipc/gsp_seq.c,
common/gsp_scheduler/ipc/gsp_seq.h,
common/gsp_scheduler/ipc/gsp_queue.c,
common/gsp_scheduler/ipc/gsp_queue.h,
common/gsp_scheduler/ipc/gsp_cmd.c,
common/gsp_scheduler/ipc/gsp_cmd.h,
common/gsp_scheduler/ipc/gsp_msg.c,
common/gsp_scheduler/ipc/gsp_msg.h,
common/gsp_scheduler/gsp_scheduler.c,
common/gsp_scheduler/gsp_scheduler.h,
include/nvgpu/gsp_sched.h ]
gsp_test:
safe: no
owner: Ramesh M
gpu: igpu
sources: [ include/nvgpu/gsp/gsp_test.h,
common/gsp_test/gsp_test.c,
common/gsp_test/gsp_test.h ]
engine_queues: engine_queues:
owner: Sagar K owner: Sagar K
children: children:

View File

@@ -423,15 +423,16 @@ ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),y)
nvgpu-$(CONFIG_NVGPU_GSP_SCHEDULER) += \ nvgpu-$(CONFIG_NVGPU_GSP_SCHEDULER) += \
common/gsp/gsp_init.o \ common/gsp/gsp_init.o \
common/gsp/gsp_bootstrap.o \ common/gsp/gsp_bootstrap.o \
common/gsp/ipc/gsp_seq.o \ common/gsp_scheduler/ipc/gsp_seq.o \
common/gsp/ipc/gsp_queue.o \ common/gsp_scheduler/ipc/gsp_queue.o \
common/gsp/ipc/gsp_cmd.o \ common/gsp_scheduler/ipc/gsp_cmd.o \
common/gsp/ipc/gsp_msg.o common/gsp_scheduler/ipc/gsp_msg.o \
common/gsp_scheduler/gsp_scheduler.o
endif endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y) ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y)
nvgpu-$(CONFIG_NVGPU_GSP_STRESS_TEST) += \ nvgpu-$(CONFIG_NVGPU_GSP_STRESS_TEST) += \
common/gsp/gsp_test.o common/gsp_test/gsp_test.o
endif endif
# Linux specific parts of nvgpu. # Linux specific parts of nvgpu.

View File

@@ -301,8 +301,8 @@ CONFIG_NVGPU_MIG := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_MIG NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_MIG
# Enable gsp scheduler for normal build # Enable gsp scheduler for normal build
CONFIG_NVGPU_GSP_SCHEDULER......:= 1 CONFIG_NVGPU_GSP_SCHEDULER := 1
NVGPU_COMMON_CFLAGS.............+= -DCONFIG_NVGPU_GSP_SCHEDULER NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_GSP_SCHEDULER
# Enable GSP stress test # Enable GSP stress test
ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1) ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1)

View File

@@ -181,10 +181,15 @@ srcs += common/device.c \
ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1) ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1)
srcs += common/gsp/gsp_init.c \ srcs += common/gsp/gsp_init.c \
common/gsp/gsp_bootstrap.c \ common/gsp/gsp_bootstrap.c \
common/gsp/ipc/gsp_seq.c \ common/gsp_scheduler/ipc/gsp_seq.c \
common/gsp/ipc/gsp_queue.c \ common/gsp_scheduler/ipc/gsp_queue.c \
common/gsp/ipc/gsp_cmd.c \ common/gsp_scheduler/ipc/gsp_cmd.c \
common/gsp/ipc/gsp_msg.c common/gsp_scheduler/ipc/gsp_msg.c \
common/gsp_scheduler/gsp_scheduler.c
endif
ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),1)
srcs += common/gsp_test/gsp_test.c
endif endif
# Source files below are functionaly safe (FuSa) and must always be included. # Source files below are functionaly safe (FuSa) and must always be included.

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -28,25 +28,8 @@
#include <nvgpu/firmware.h> #include <nvgpu/firmware.h>
#include <nvgpu/io.h> #include <nvgpu/io.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "gsp_priv.h"
#include "gsp_bootstrap.h"
#define GSP_WAIT_TIME_MS 10000U
#define GSP_DBG_RISCV_FW_MANIFEST "sample-gsp.manifest.encrypt.bin.out.bin"
#define GSP_DBG_RISCV_FW_CODE "sample-gsp.text.encrypt.bin"
#define GSP_DBG_RISCV_FW_DATA "sample-gsp.data.encrypt.bin"
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
#define GSPDBG_RISCV_STRESS_TEST_FW_MANIFEST "gsp-stress.manifest.encrypt.bin.out.bin" #include <nvgpu/gsp/gsp_test.h>
#define GSPDBG_RISCV_STRESS_TEST_FW_CODE "gsp-stress.text.encrypt.bin"
#define GSPDBG_RISCV_STRESS_TEST_FW_DATA "gsp-stress.data.encrypt.bin"
#define GSPPROD_RISCV_STRESS_TEST_FW_MANIFEST "gsp-stress.manifest.encrypt.bin.out.bin.prod"
#define GSPPROD_RISCV_STRESS_TEST_FW_CODE "gsp-stress.text.encrypt.bin.prod"
#define GSPPROD_RISCV_STRESS_TEST_FW_DATA "gsp-stress.data.encrypt.bin.prod"
#define GSP_STRESS_TEST_MAILBOX_PASS 0xAAAAAAAA
#endif #endif
static void gsp_release_firmware(struct gk20a *g, struct nvgpu_gsp *gsp) static void gsp_release_firmware(struct gk20a *g, struct nvgpu_gsp *gsp)
@@ -64,86 +47,45 @@ static void gsp_release_firmware(struct gk20a *g, struct nvgpu_gsp *gsp)
} }
} }
static int gsp_read_firmware(struct gk20a *g, struct gsp_fw *gsp_ucode) static int gsp_read_firmware(struct gk20a *g, struct nvgpu_gsp *gsp,
struct gsp_fw *gsp_ucode)
{ {
const char *gsp_code_name; const char *code_name = gsp_ucode->code_name;
const char *gsp_data_name; const char *data_name = gsp_ucode->data_name;
const char *gsp_manifest_name; const char *manifest_name = gsp_ucode->manifest_name;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
if (g->gsp->gsp_test.load_stress_test) {
/*
* TODO Switch to GSP specific register
*/
if (g->ops.pmu.is_debug_mode_enabled(g)) {
gsp_code_name = GSPDBG_RISCV_STRESS_TEST_FW_CODE;
gsp_data_name = GSPDBG_RISCV_STRESS_TEST_FW_DATA;
gsp_manifest_name = GSPDBG_RISCV_STRESS_TEST_FW_MANIFEST;
} else {
gsp_code_name = GSPPROD_RISCV_STRESS_TEST_FW_CODE;
gsp_data_name = GSPPROD_RISCV_STRESS_TEST_FW_DATA;
gsp_manifest_name = GSPPROD_RISCV_STRESS_TEST_FW_MANIFEST;
}
} else
#endif
{
gsp_code_name = GSP_DBG_RISCV_FW_CODE;
gsp_data_name = GSP_DBG_RISCV_FW_DATA;
gsp_manifest_name = GSP_DBG_RISCV_FW_MANIFEST;
}
gsp_ucode->manifest = nvgpu_request_firmware(g, gsp_ucode->manifest = nvgpu_request_firmware(g,
gsp_manifest_name, NVGPU_REQUEST_FIRMWARE_NO_WARN); manifest_name, NVGPU_REQUEST_FIRMWARE_NO_WARN);
if (gsp_ucode->manifest == NULL) { if (gsp_ucode->manifest == NULL) {
nvgpu_err(g, "%s ucode get failed", gsp_manifest_name); nvgpu_err(g, "%s ucode get failed", manifest_name);
goto fw_release; goto fw_release;
} }
gsp_ucode->code = nvgpu_request_firmware(g, gsp_ucode->code = nvgpu_request_firmware(g,
gsp_code_name, NVGPU_REQUEST_FIRMWARE_NO_WARN); code_name, NVGPU_REQUEST_FIRMWARE_NO_WARN);
if (gsp_ucode->code == NULL) { if (gsp_ucode->code == NULL) {
nvgpu_err(g, "%s ucode get failed", gsp_code_name); nvgpu_err(g, "%s ucode get failed", code_name);
goto fw_release; goto fw_release;
} }
gsp_ucode->data = nvgpu_request_firmware(g, gsp_ucode->data = nvgpu_request_firmware(g,
gsp_data_name, NVGPU_REQUEST_FIRMWARE_NO_WARN); data_name, NVGPU_REQUEST_FIRMWARE_NO_WARN);
if (gsp_ucode->data == NULL) { if (gsp_ucode->data == NULL) {
nvgpu_err(g, "%s ucode get failed", gsp_data_name); nvgpu_err(g, "%s ucode get failed", data_name);
goto fw_release; goto fw_release;
} }
return 0; return 0;
fw_release: fw_release:
gsp_release_firmware(g, g->gsp); gsp_release_firmware(g, gsp);
return -ENOENT; return -ENOENT;
} }
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
static void gsp_write_test_sysmem_addr(struct nvgpu_gsp *gsp)
{
struct gk20a *g;
struct nvgpu_falcon *flcn;
u64 sysmem_addr;
g = gsp->g;
flcn = gsp->gsp_flcn;
sysmem_addr = nvgpu_mem_get_addr(g, &gsp->gsp_test.gsp_test_sysmem_block);
nvgpu_falcon_mailbox_write(flcn, FALCON_MAILBOX_0, u64_lo32(sysmem_addr));
nvgpu_falcon_mailbox_write(flcn, FALCON_MAILBOX_1, u64_hi32(sysmem_addr));
}
#endif
static int gsp_ucode_load_and_bootstrap(struct gk20a *g, static int gsp_ucode_load_and_bootstrap(struct gk20a *g,
struct nvgpu_falcon *flcn, struct nvgpu_falcon *flcn, struct gsp_fw *gsp_ucode)
struct gsp_fw *gsp_ucode)
{ {
u32 dmem_size = 0U; u32 dmem_size = 0U;
u32 code_size = gsp_ucode->code->size; u32 code_size = gsp_ucode->code->size;
@@ -197,8 +139,9 @@ static int gsp_ucode_load_and_bootstrap(struct gk20a *g,
* Update the address of the allocated sysmem block in the * Update the address of the allocated sysmem block in the
* mailbox register for stress test. * mailbox register for stress test.
*/ */
if (g->gsp->gsp_test.load_stress_test) if (nvgpu_gsp_get_stress_test_load(g)) {
gsp_write_test_sysmem_addr(g->gsp); nvgpu_gsp_write_test_sysmem_addr(g);
}
#endif #endif
g->ops.falcon.bootstrap(flcn, 0x0); g->ops.falcon.bootstrap(flcn, 0x0);
@@ -240,32 +183,23 @@ exit:
return -1; return -1;
} }
static int gsp_wait_for_mailbox_update(struct nvgpu_gsp *gsp, int nvgpu_gsp_wait_for_mailbox_update(struct nvgpu_gsp *gsp,
u32 mailbox_index, signed int timeoutms) u32 mailbox_index, u32 exp_value, signed int timeoutms)
{ {
u32 mail_box_data = 0; u32 mail_box_data = 0;
u32 pass_val = 0;
struct nvgpu_falcon *flcn = gsp->gsp_flcn; struct nvgpu_falcon *flcn = gsp->gsp_flcn;
nvgpu_log_fn(flcn->g, " "); nvgpu_log_fn(flcn->g, " ");
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
if (gsp->gsp_test.load_stress_test) {
pass_val = GSP_STRESS_TEST_MAILBOX_PASS;
}
#endif
do { do {
mail_box_data = flcn->g->ops.falcon.mailbox_read( mail_box_data = flcn->g->ops.falcon.mailbox_read(
flcn, mailbox_index); flcn, mailbox_index);
if (mail_box_data != 0U) { if (mail_box_data == exp_value) {
if ((pass_val == 0U) || (mail_box_data == pass_val)) {
nvgpu_info(flcn->g, nvgpu_info(flcn->g,
"gsp mailbox-0 updated successful with 0x%x", "gsp mailbox-0 updated successful with 0x%x",
mail_box_data); mail_box_data);
break; break;
} }
}
if (timeoutms <= 0) { if (timeoutms <= 0) {
nvgpu_err(flcn->g, "gsp mailbox check timedout"); nvgpu_err(flcn->g, "gsp mailbox check timedout");
@@ -280,7 +214,7 @@ static int gsp_wait_for_mailbox_update(struct nvgpu_gsp *gsp,
return 0; return 0;
} }
int gsp_bootstrap_ns(struct gk20a *g, struct nvgpu_gsp *gsp) int nvgpu_gsp_bootstrap_ns(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
int err = 0; int err = 0;
struct gsp_fw *gsp_ucode = &gsp->gsp_ucode; struct gsp_fw *gsp_ucode = &gsp->gsp_ucode;
@@ -288,46 +222,34 @@ int gsp_bootstrap_ns(struct gk20a *g, struct nvgpu_gsp *gsp)
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
err = gsp_read_firmware(g, gsp_ucode); err = gsp_read_firmware(g, gsp, gsp_ucode);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gsp firmware reading failed"); nvgpu_err(g, "gsp firmware reading failed");
goto exit; goto exit;
} }
/* core reset */ /* core reset */
err = nvgpu_falcon_reset(gsp->gsp_flcn); err = nvgpu_falcon_reset(flcn);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gsp core reset failed err=%d", err); nvgpu_err(g, "gsp core reset failed err=%d", err);
goto exit; goto exit;
} }
/* Enable required interrupts support and isr */ /* Enable required interrupts support and isr */
nvgpu_gsp_isr_support(g, true); nvgpu_gsp_isr_support(g, gsp, true);
/* setup falcon apertures */ err = gsp_ucode_load_and_bootstrap(g, flcn, gsp_ucode);
if (flcn->flcn_engine_dep_ops.setup_bootstrap_config != NULL) {
flcn->flcn_engine_dep_ops.setup_bootstrap_config(flcn->g);
}
err = gsp_ucode_load_and_bootstrap(g, gsp->gsp_flcn, gsp_ucode);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gsp load and bootstrap failed"); nvgpu_err(g, "gsp load and bootstrap failed");
goto exit; goto exit;
} }
err = gsp_check_for_brom_completion(gsp->gsp_flcn, GSP_WAIT_TIME_MS); err = gsp_check_for_brom_completion(flcn, GSP_WAIT_TIME_MS);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "gsp BROM failed"); nvgpu_err(g, "gsp BROM failed");
goto exit;
}
/* wait for mailbox-0 update with non-zero value */
err = gsp_wait_for_mailbox_update(gsp, 0x0, GSP_WAIT_TIME_MS);
if (err != 0) {
nvgpu_err(g, "gsp ucode failed to update mailbox-0");
} }
exit: exit:
gsp_release_firmware(g, g->gsp); gsp_release_firmware(g, gsp);
return err; return err;
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,103 +26,48 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
#include <nvgpu/dma.h> #include <nvgpu/gsp/gsp_test.h>
#endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include <nvgpu/gsp_sched.h>
#endif #endif
#include "ipc/gsp_seq.h" void nvgpu_gsp_isr_support(struct gk20a *g, struct nvgpu_gsp *gsp, bool enable)
#include "ipc/gsp_queue.h"
#include "gsp_priv.h"
#include "gsp_bootstrap.h"
void nvgpu_gsp_isr_support(struct gk20a *g, bool enable)
{ {
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
/* Enable irq*/ /* Enable irq*/
nvgpu_mutex_acquire(&g->gsp->isr_mutex); nvgpu_mutex_acquire(&gsp->isr_mutex);
if (g->ops.gsp.enable_irq != NULL) { if (g->ops.gsp.enable_irq != NULL) {
g->ops.gsp.enable_irq(g, enable); g->ops.gsp.enable_irq(g, enable);
} }
g->gsp->isr_enabled = enable; gsp->isr_enabled = enable;
nvgpu_mutex_release(&g->gsp->isr_mutex); nvgpu_mutex_release(&gsp->isr_mutex);
} }
void nvgpu_gsp_suspend(struct gk20a *g) void nvgpu_gsp_suspend(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
nvgpu_gsp_isr_support(g, false); nvgpu_gsp_isr_support(g, gsp, false);
}
void nvgpu_gsp_sw_deinit(struct gk20a *g)
{
if (g->gsp != NULL) {
nvgpu_mutex_destroy(&g->gsp->isr_mutex);
#ifdef CONFIG_NVGPU_FALCON_DEBUG #ifdef CONFIG_NVGPU_FALCON_DEBUG
nvgpu_falcon_dbg_buf_destroy(g->gsp->gsp_flcn); nvgpu_falcon_dbg_error_print_enable(gsp->gsp_flcn, false);
#endif #endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
nvgpu_dma_free(g, &g->gsp->gsp_test.gsp_test_sysmem_block);
#endif
nvgpu_gsp_sequences_free(g, g->gsp->sequences);
nvgpu_gsp_queues_free(g, g->gsp->queues);
g->gsp->gsp_ready = false;
nvgpu_kfree(g, g->gsp);
g->gsp = NULL;
}
} }
int nvgpu_gsp_sw_init(struct gk20a *g) void nvgpu_gsp_sw_deinit(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
int err = 0; if (gsp != NULL) {
struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " ");
if (g->gsp != NULL) {
/*
* Recovery/unrailgate case, we do not need to do gsp init as
* gsp is set during cold boot & doesn't execute gsp clean up as
* part of power off sequence, so reuse to perform faster boot.
*/
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
nvgpu_gsp_stress_test_bootstrap(g, false);
#endif
return err;
}
/* Init struct holding the gsp software state */
g->gsp = (struct nvgpu_gsp *)nvgpu_kzalloc(g, sizeof(struct nvgpu_gsp));
if (g->gsp == NULL) {
err = -ENOMEM;
goto exit;
}
gsp = g->gsp;
gsp->g = g;
/* gsp falcon software state */
gsp->gsp_flcn = &g->gsp_flcn;
/* Init isr mutex */
nvgpu_mutex_init(&gsp->isr_mutex);
err = nvgpu_gsp_sequences_init(g, g->gsp);
if (err != 0) {
nvgpu_err(g, "GSP sequences init failed");
nvgpu_mutex_destroy(&gsp->isr_mutex); nvgpu_mutex_destroy(&gsp->isr_mutex);
nvgpu_kfree(g, g->gsp); #ifdef CONFIG_NVGPU_FALCON_DEBUG
g->gsp = NULL; nvgpu_falcon_dbg_buf_destroy(gsp->gsp_flcn);
} #endif
exit: nvgpu_kfree(g, gsp);
return err; gsp = NULL;
}
} }
int nvgpu_gsp_bootstrap(struct gk20a *g) int nvgpu_gsp_debug_buf_init(struct gk20a *g, u32 queue_no, u32 buffer_size)
{ {
int err = 0; int err = 0;
@@ -133,146 +78,47 @@ int nvgpu_gsp_bootstrap(struct gk20a *g)
if ((g->ops.gsp.gsp_get_queue_head != NULL) && if ((g->ops.gsp.gsp_get_queue_head != NULL) &&
(g->ops.gsp.gsp_get_queue_tail != NULL)) { (g->ops.gsp.gsp_get_queue_tail != NULL)) {
err = nvgpu_falcon_dbg_buf_init( err = nvgpu_falcon_dbg_buf_init(
g->gsp->gsp_flcn, GSP_DMESG_BUFFER_SIZE, &g->gsp_flcn, buffer_size,
g->ops.gsp.gsp_get_queue_head(GSP_DEBUG_BUFFER_QUEUE), g->ops.gsp.gsp_get_queue_head(queue_no),
g->ops.gsp.gsp_get_queue_tail(GSP_DEBUG_BUFFER_QUEUE)); g->ops.gsp.gsp_get_queue_tail(queue_no));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "GSP debug init failed"); nvgpu_err(g, "GSP debug init failed");
goto de_init;
} }
} }
#endif #endif
err = gsp_bootstrap_ns(g, g->gsp);
if (err != 0) {
nvgpu_err(g, "GSP bootstrap failed");
goto de_init;
}
return err;
de_init:
nvgpu_gsp_sw_deinit(g);
return err; return err;
} }
void nvgpu_gsp_isr_mutex_aquire(struct gk20a *g) void nvgpu_gsp_isr_mutex_acquire(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
struct nvgpu_gsp *gsp = g->gsp;
nvgpu_mutex_acquire(&gsp->isr_mutex); nvgpu_mutex_acquire(&gsp->isr_mutex);
} }
void nvgpu_gsp_isr_mutex_release(struct gk20a *g) void nvgpu_gsp_isr_mutex_release(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
struct nvgpu_gsp *gsp = g->gsp;
nvgpu_mutex_release(&gsp->isr_mutex); nvgpu_mutex_release(&gsp->isr_mutex);
} }
bool nvgpu_gsp_is_isr_enable(struct gk20a *g) bool nvgpu_gsp_is_isr_enable(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
struct nvgpu_gsp *gsp = g->gsp;
return gsp->isr_enabled; return gsp->isr_enabled;
} }
struct nvgpu_falcon *nvgpu_gsp_falcon_instance(struct gk20a *g) struct nvgpu_falcon *nvgpu_gsp_falcon_instance(struct gk20a *g)
{ {
struct nvgpu_gsp *gsp = g->gsp; return &g->gsp_flcn;
return gsp->gsp_flcn;
} }
void nvgpu_gsp_isr(struct gk20a *g)
{
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
int nvgpu_gsp_stress_test_bootstrap(struct gk20a *g, bool start) if (nvgpu_gsp_get_stress_test_load(g)) {
{ nvgpu_gsp_stest_isr(g);
int err = 0; return;
struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " ");
gsp = g->gsp;
if (gsp == NULL) {
nvgpu_err(g, "GSP not initialized");
err = -EFAULT;
goto exit;
} }
if (!start && !(gsp->gsp_test.load_stress_test))
return err;
if (start) {
err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED,
SZ_64K,
&g->gsp->gsp_test.gsp_test_sysmem_block);
if (err != 0) {
nvgpu_err(g, "GSP test memory alloc failed");
goto exit;
}
}
gsp->gsp_test.load_stress_test = true;
err = nvgpu_gsp_bootstrap(g);
if (err != 0) {
nvgpu_err(g, "GSP bootstrap failed for stress test");
goto exit;
}
if (gsp->gsp_test.enable_stress_test) {
nvgpu_info(g, "Restarting GSP stress test");
nvgpu_falcon_mailbox_write(gsp->gsp_flcn, FALCON_MAILBOX_1, 0xFFFFFFFF);
}
return err;
exit:
gsp->gsp_test.load_stress_test = false;
return err;
}
int nvgpu_gsp_stress_test_halt(struct gk20a *g, bool restart)
{
int err = 0;
struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " ");
gsp = g->gsp;
if ((gsp == NULL)) {
nvgpu_info(g, "GSP not initialized");
goto exit;
}
if (restart && (gsp->gsp_test.load_stress_test == false)) {
nvgpu_info(g, "GSP stress test not loaded ");
goto exit;
}
err = nvgpu_falcon_reset(gsp->gsp_flcn);
if (err != 0) {
nvgpu_err(g, "gsp reset failed err=%d", err);
goto exit;
}
if (!restart) {
gsp->gsp_test.load_stress_test = false;
nvgpu_dma_free(g, &g->gsp->gsp_test.gsp_test_sysmem_block);
}
exit:
return err;
}
bool nvgpu_gsp_is_stress_test(struct gk20a *g)
{
if (g->gsp->gsp_test.load_stress_test)
return true;
else
return false;
}
#endif #endif
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
nvgpu_gsp_sched_isr(g);
#endif
return;
}

View File

@@ -1,114 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GSP Test Functions
*
* Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nvgpu/firmware.h>
#include <nvgpu/falcon.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/gsp.h>
#include <nvgpu/gsp/gsp_test.h>
#include "gsp_priv.h"
#include "gsp_bootstrap.h"
u32 nvgpu_gsp_get_current_iteration(struct gk20a *g)
{
u32 data = 0;
struct nvgpu_gsp *gsp = g->gsp;
nvgpu_log_fn(g, " ");
data = nvgpu_falcon_mailbox_read(gsp->gsp_flcn, FALCON_MAILBOX_1);
return data;
}
u32 nvgpu_gsp_get_current_test(struct gk20a *g)
{
u32 data = 0;
struct nvgpu_gsp *gsp = g->gsp;
nvgpu_log_fn(g, " ");
data = nvgpu_falcon_mailbox_read(gsp->gsp_flcn, FALCON_MAILBOX_0);
return data;
}
bool nvgpu_gsp_get_test_fail_status(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp;
return gsp->gsp_test.stress_test_fail_status;
}
bool nvgpu_gsp_get_stress_test_start(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp;
return gsp->gsp_test.enable_stress_test;
}
bool nvgpu_gsp_get_stress_test_load(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp;
if (gsp == NULL)
return false;
return gsp->gsp_test.load_stress_test;
}
void nvgpu_gsp_set_test_fail_status(struct gk20a *g, bool val)
{
struct nvgpu_gsp *gsp = g->gsp;
gsp->gsp_test.stress_test_fail_status = val;
}
int nvgpu_gsp_set_stress_test_start(struct gk20a *g, bool flag)
{
int err = 0;
struct nvgpu_gsp *gsp = g->gsp;
nvgpu_log_fn(g, " ");
if (flag) {
nvgpu_info(g, "Enabling GSP test");
nvgpu_falcon_mailbox_write(gsp->gsp_flcn, FALCON_MAILBOX_1, 0xFFFFFFFF);
} else {
nvgpu_info(g, "Halting GSP test");
nvgpu_gsp_stress_test_halt(g, false);
}
gsp->gsp_test.enable_stress_test = flag;
return err;
}
int nvgpu_gsp_set_stress_test_load(struct gk20a *g, bool flag)
{
int err = 0;
nvgpu_log_fn(g, " ");
if (flag)
err = nvgpu_gsp_stress_test_bootstrap(g, flag);
return err;
}

View File

@@ -0,0 +1,184 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/gk20a.h>
#include <nvgpu/string.h>
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/errno.h>
#include <nvgpu/gsp.h>
#include <nvgpu/gsp_sched.h>
#include "gsp_scheduler.h"
#include "ipc/gsp_seq.h"
#include "ipc/gsp_queue.h"
static void gsp_sched_get_file_names(struct gk20a *g, struct gsp_fw *gsp_ucode)
{
nvgpu_log_fn(g, " ");
gsp_ucode->code_name = GSP_DBG_RISCV_FW_CODE;
gsp_ucode->data_name = GSP_DBG_RISCV_FW_DATA;
gsp_ucode->manifest_name = GSP_DBG_RISCV_FW_MANIFEST;
}
void nvgpu_gsp_sched_suspend(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched)
{
struct nvgpu_gsp *gsp = gsp_sched->gsp;
nvgpu_log_fn(g, " ");
if (gsp == NULL) {
nvgpu_info(g, "GSP not initialized");
return;
}
gsp_sched->gsp_ready = false;
nvgpu_gsp_suspend(g, gsp);
}
static void gsp_sched_deinit(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched)
{
gsp_sched->gsp_ready = false;
nvgpu_kfree(g, gsp_sched);
gsp_sched = NULL;
}
void nvgpu_gsp_sched_sw_deinit(struct gk20a *g)
{
struct nvgpu_gsp_sched *gsp_sched = g->gsp_sched;
nvgpu_log_fn(g, " ");
if (gsp_sched == NULL) {
return;
}
if (gsp_sched->gsp != NULL) {
nvgpu_gsp_sw_deinit(g, gsp_sched->gsp);
}
if (gsp_sched->sequences != NULL) {
nvgpu_gsp_sequences_free(g, gsp_sched->sequences);
}
if (gsp_sched->queues != NULL) {
nvgpu_gsp_queues_free(g, gsp_sched->queues);
}
if (gsp_sched != NULL) {
gsp_sched_deinit(g, gsp_sched);
}
}
int nvgpu_gsp_sched_sw_init(struct gk20a *g)
{
int err = 0;
struct nvgpu_gsp_sched *gsp_sched;
struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " ");
if (g->gsp_sched != NULL) {
/*
* Recovery/unrailgate case, we do not need to do gsp_sched init as
* gsp_sched is set during cold boot & doesn't execute gsp_sched clean
* up as part of power off sequence, so reuse to perform faster boot.
*/
return err;
}
/* Init struct holding the gsp sched software state */
g->gsp_sched = (struct nvgpu_gsp_sched *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_gsp_sched));
if (g->gsp_sched == NULL) {
err = -ENOMEM;
goto de_init;
}
/* Init struct holding the gsp software state */
g->gsp_sched->gsp = (struct nvgpu_gsp *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_gsp));
if (g->gsp_sched->gsp == NULL) {
err = -ENOMEM;
goto de_init;
}
gsp_sched = g->gsp_sched;
gsp = g->gsp_sched->gsp;
/* gsp falcon software state */
gsp->gsp_flcn = &g->gsp_flcn;
gsp->g = g;
/* Init isr mutex */
nvgpu_mutex_init(&gsp->isr_mutex);
err = nvgpu_gsp_sequences_init(g, gsp_sched);
if (err != 0) {
nvgpu_err(g, "GSP sequences init failed");
goto de_init;
}
nvgpu_log_fn(g, " Done ");
return err;
de_init:
nvgpu_gsp_sched_sw_deinit(g);
return err;
}
int nvgpu_gsp_sched_bootstrap_ns(struct gk20a *g)
{
struct nvgpu_gsp_sched *gsp_sched = g->gsp_sched;
int status = 0;
#ifdef CONFIG_NVGPU_FALCON_DEBUG
status = nvgpu_gsp_debug_buf_init(g, GSP_SCHED_DEBUG_BUFFER_QUEUE,
GSP_SCHED_DMESG_BUFFER_SIZE);
if (status != 0) {
nvgpu_err(g, "GSP sched debug buf init failed");
goto de_init;
}
#endif
/* Get ucode file names */
gsp_sched_get_file_names(g, &gsp_sched->gsp->gsp_ucode);
status = nvgpu_gsp_bootstrap_ns(g, gsp_sched->gsp);
if (status != 0) {
nvgpu_err(g, " GSP sched bootstrap failed ");
goto de_init;
}
return status;
de_init:
nvgpu_gsp_sched_sw_deinit(g);
return status;
}
void nvgpu_gsp_sched_isr(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp_sched->gsp;
g->ops.gsp.gsp_isr(g, gsp);
}

View File

@@ -0,0 +1,49 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef GSP_SCHEDULER_H
#define GSP_SCHEDULER_H
#define GSP_SCHED_DEBUG_BUFFER_QUEUE 3U
#define GSP_SCHED_DMESG_BUFFER_SIZE 0x1000U
#define GSP_QUEUE_NUM 2U
#define GSP_DBG_RISCV_FW_MANIFEST "sample-gsp.manifest.encrypt.bin.out.bin"
#define GSP_DBG_RISCV_FW_CODE "sample-gsp.text.encrypt.bin"
#define GSP_DBG_RISCV_FW_DATA "sample-gsp.data.encrypt.bin"
/* GSP descriptor's */
struct nvgpu_gsp_sched {
struct nvgpu_gsp *gsp;
struct gsp_sequences *sequences;
struct nvgpu_engine_mem_queue *queues[GSP_QUEUE_NUM];
u32 command_ack;
/* set to true once init received */
bool gsp_ready;
};
#endif /* GSP_SCHEDULER_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -25,7 +25,7 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "../gsp_priv.h" #include "../gsp_scheduler.h"
#include "gsp_seq.h" #include "gsp_seq.h"
#include "gsp_queue.h" #include "gsp_queue.h"
#include "gsp_cmd.h" #include "gsp_cmd.h"
@@ -35,10 +35,10 @@ u8 gsp_unit_id_is_valid(u8 id)
return (id < NV_GSP_UNIT_END); return (id < NV_GSP_UNIT_END);
} }
static bool gsp_validate_cmd(struct nvgpu_gsp *gsp, static bool gsp_validate_cmd(struct nvgpu_gsp_sched *gsp_sched,
struct nv_flcn_cmd_gsp *cmd, u32 queue_id) struct nv_flcn_cmd_gsp *cmd, u32 queue_id)
{ {
struct gk20a *g = gsp->g; struct gk20a *g = gsp_sched->gsp->g;
u32 queue_size; u32 queue_size;
if (queue_id != GSP_NV_CMDQ_LOG_ID) { if (queue_id != GSP_NV_CMDQ_LOG_ID) {
@@ -49,7 +49,7 @@ static bool gsp_validate_cmd(struct nvgpu_gsp *gsp,
goto invalid_cmd; goto invalid_cmd;
} }
queue_size = nvgpu_gsp_queue_get_size(gsp->queues, queue_id); queue_size = nvgpu_gsp_queue_get_size(gsp_sched->queues, queue_id);
if (cmd->hdr.size > (queue_size >> 1)) { if (cmd->hdr.size > (queue_size >> 1)) {
goto invalid_cmd; goto invalid_cmd;
@@ -69,12 +69,13 @@ invalid_cmd:
return false; return false;
} }
static int gsp_write_cmd(struct nvgpu_gsp *gsp, static int gsp_write_cmd(struct nvgpu_gsp_sched *gsp_sched,
struct nv_flcn_cmd_gsp *cmd, u32 queue_id, struct nv_flcn_cmd_gsp *cmd, u32 queue_id,
u32 timeout_ms) u32 timeout_ms)
{ {
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
struct gk20a *g = gsp->g; struct gk20a *g = gsp_sched->gsp->g;
struct nvgpu_gsp *gsp = gsp_sched->gsp;
int err; int err;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
@@ -82,7 +83,7 @@ static int gsp_write_cmd(struct nvgpu_gsp *gsp,
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms); nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do { do {
err = nvgpu_gsp_queue_push(gsp->queues, queue_id, gsp->gsp_flcn, err = nvgpu_gsp_queue_push(gsp_sched->queues, queue_id, gsp->gsp_flcn,
cmd, cmd->hdr.size); cmd, cmd->hdr.size);
if ((err == -EAGAIN) && if ((err == -EAGAIN) &&
(nvgpu_timeout_expired(&timeout) == 0)) { (nvgpu_timeout_expired(&timeout) == 0)) {
@@ -103,7 +104,7 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
u32 queue_id, gsp_callback callback, u32 queue_id, gsp_callback callback,
void *cb_param, u32 timeout) void *cb_param, u32 timeout)
{ {
struct nvgpu_gsp *gsp = g->gsp; struct nvgpu_gsp_sched *gsp_sched = g->gsp_sched;
struct gsp_sequence *seq = NULL; struct gsp_sequence *seq = NULL;
int err = 0; int err = 0;
@@ -114,13 +115,13 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
} }
/* Sanity check the command input. */ /* Sanity check the command input. */
if (!gsp_validate_cmd(gsp, cmd, queue_id)) { if (!gsp_validate_cmd(gsp_sched, cmd, queue_id)) {
err = -EINVAL; err = -EINVAL;
goto exit; goto exit;
} }
/* Attempt to reserve a sequence for this command. */ /* Attempt to reserve a sequence for this command. */
err = nvgpu_gsp_seq_acquire(g, gsp->sequences, &seq, err = nvgpu_gsp_seq_acquire(g, gsp_sched->sequences, &seq,
callback, cb_param); callback, cb_param);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
@@ -134,9 +135,9 @@ int nvgpu_gsp_cmd_post(struct gk20a *g, struct nv_flcn_cmd_gsp *cmd,
nvgpu_gsp_seq_set_state(seq, GSP_SEQ_STATE_USED); nvgpu_gsp_seq_set_state(seq, GSP_SEQ_STATE_USED);
err = gsp_write_cmd(gsp, cmd, queue_id, timeout); err = gsp_write_cmd(gsp_sched, cmd, queue_id, timeout);
if (err != 0) { if (err != 0) {
gsp_seq_release(gsp->sequences, seq); gsp_seq_release(gsp_sched->sequences, seq);
} }
exit: exit:

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -24,22 +24,22 @@
#include <nvgpu/gk20a.h> #include <nvgpu/gk20a.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "../gsp_priv.h" #include "../gsp_scheduler.h"
#include "gsp_queue.h" #include "gsp_queue.h"
#include "gsp_msg.h" #include "gsp_msg.h"
#include "gsp_seq.h" #include "gsp_seq.h"
/* Message/Event request handlers */ /* Message/Event request handlers */
static int gsp_response_handle(struct nvgpu_gsp *gsp, static int gsp_response_handle(struct nvgpu_gsp_sched *gsp_sched,
struct nv_flcn_msg_gsp *msg) struct nv_flcn_msg_gsp *msg)
{ {
struct gk20a *g = gsp->g; struct gk20a *g = gsp_sched->gsp->g;
return nvgpu_gsp_seq_response_handle(g, gsp->sequences, return nvgpu_gsp_seq_response_handle(g, gsp_sched->sequences,
msg, msg->hdr.seq_id); msg, msg->hdr.seq_id);
} }
static int gsp_handle_event(struct nvgpu_gsp *gsp, static int gsp_handle_event(struct nvgpu_gsp_sched *gsp_sched,
struct nv_flcn_msg_gsp *msg) struct nv_flcn_msg_gsp *msg)
{ {
int err = 0; int err = 0;
@@ -52,29 +52,29 @@ static int gsp_handle_event(struct nvgpu_gsp *gsp,
return err; return err;
} }
static bool gsp_read_message(struct nvgpu_gsp *gsp, static bool gsp_read_message(struct nvgpu_gsp_sched *gsp_sched,
u32 queue_id, struct nv_flcn_msg_gsp *msg, int *status) u32 queue_id, struct nv_flcn_msg_gsp *msg, int *status)
{ {
struct gk20a *g = gsp->g; struct gk20a *g = gsp_sched->gsp->g;
u32 read_size; u32 read_size;
int err; int err;
*status = 0U; *status = 0U;
if (nvgpu_gsp_queue_is_empty(gsp->queues, queue_id)) { if (nvgpu_gsp_queue_is_empty(gsp_sched->queues, queue_id)) {
return false; return false;
} }
if (!nvgpu_gsp_queue_read(g, gsp->queues, queue_id, if (!nvgpu_gsp_queue_read(g, gsp_sched->queues, queue_id,
gsp->gsp_flcn, &msg->hdr, gsp_sched->gsp->gsp_flcn, &msg->hdr,
GSP_MSG_HDR_SIZE, status)) { GSP_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d", queue_id); nvgpu_err(g, "fail to read msg from queue %d", queue_id);
goto clean_up; goto clean_up;
} }
if (msg->hdr.unit_id == NV_GSP_UNIT_REWIND) { if (msg->hdr.unit_id == NV_GSP_UNIT_REWIND) {
err = nvgpu_gsp_queue_rewind(gsp->gsp_flcn, err = nvgpu_gsp_queue_rewind(gsp_sched->gsp->gsp_flcn,
gsp->queues, queue_id); gsp_sched->queues, queue_id);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "fail to rewind queue %d", queue_id); nvgpu_err(g, "fail to rewind queue %d", queue_id);
*status = err; *status = err;
@@ -82,8 +82,8 @@ static bool gsp_read_message(struct nvgpu_gsp *gsp,
} }
/* read again after rewind */ /* read again after rewind */
if (!nvgpu_gsp_queue_read(g, gsp->queues, queue_id, if (!nvgpu_gsp_queue_read(g, gsp_sched->queues, queue_id,
gsp->gsp_flcn, &msg->hdr, gsp_sched->gsp->gsp_flcn, &msg->hdr,
GSP_MSG_HDR_SIZE, status)) { GSP_MSG_HDR_SIZE, status)) {
nvgpu_err(g, "fail to read msg from queue %d", nvgpu_err(g, "fail to read msg from queue %d",
queue_id); queue_id);
@@ -100,8 +100,8 @@ static bool gsp_read_message(struct nvgpu_gsp *gsp,
if (msg->hdr.size > GSP_MSG_HDR_SIZE) { if (msg->hdr.size > GSP_MSG_HDR_SIZE) {
read_size = msg->hdr.size - GSP_MSG_HDR_SIZE; read_size = msg->hdr.size - GSP_MSG_HDR_SIZE;
if (!nvgpu_gsp_queue_read(g, gsp->queues, queue_id, if (!nvgpu_gsp_queue_read(g, gsp_sched->queues, queue_id,
gsp->gsp_flcn, &msg->msg, gsp_sched->gsp->gsp_flcn, &msg->msg,
read_size, status)) { read_size, status)) {
nvgpu_err(g, "fail to read msg from queue %d", nvgpu_err(g, "fail to read msg from queue %d",
queue_id); queue_id);
@@ -115,17 +115,18 @@ clean_up:
return false; return false;
} }
static int gsp_process_init_msg(struct nvgpu_gsp *gsp, static int gsp_process_init_msg(struct nvgpu_gsp_sched *gsp_sched,
struct nv_flcn_msg_gsp *msg) struct nv_flcn_msg_gsp *msg)
{ {
struct gk20a *g = gsp->g; struct gk20a *g = gsp_sched->gsp->g;
struct nvgpu_gsp *gsp = gsp_sched->gsp;
struct gsp_init_msg_gsp_init *gsp_init; struct gsp_init_msg_gsp_init *gsp_init;
u32 tail = 0; u32 tail = 0;
int err = 0; int err = 0;
g->ops.gsp.msgq_tail(g, gsp, &tail, QUEUE_GET); g->ops.gsp.msgq_tail(g, gsp, &tail, QUEUE_GET);
err = nvgpu_falcon_copy_from_emem(gsp->gsp_flcn, tail, err = nvgpu_falcon_copy_from_emem(gsp_sched->gsp->gsp_flcn, tail,
(u8 *)&msg->hdr, GSP_MSG_HDR_SIZE, 0U); (u8 *)&msg->hdr, GSP_MSG_HDR_SIZE, 0U);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
@@ -137,7 +138,7 @@ static int gsp_process_init_msg(struct nvgpu_gsp *gsp,
goto exit; goto exit;
} }
err = nvgpu_falcon_copy_from_emem(gsp->gsp_flcn, tail + GSP_MSG_HDR_SIZE, err = nvgpu_falcon_copy_from_emem(gsp_sched->gsp->gsp_flcn, tail + GSP_MSG_HDR_SIZE,
(u8 *)&msg->msg, msg->hdr.size - GSP_MSG_HDR_SIZE, 0U); (u8 *)&msg->msg, msg->hdr.size - GSP_MSG_HDR_SIZE, 0U);
if (err != 0) { if (err != 0) {
goto exit; goto exit;
@@ -154,12 +155,12 @@ static int gsp_process_init_msg(struct nvgpu_gsp *gsp,
gsp_init = &msg->msg.init.gsp_init; gsp_init = &msg->msg.init.gsp_init;
err = nvgpu_gsp_queues_init(g, gsp->queues, gsp_init); err = nvgpu_gsp_queues_init(g, gsp_sched->queues, gsp_init);
if (err != 0) { if (err != 0) {
return err; return err;
} }
gsp->gsp_ready = true; gsp_sched->gsp_ready = true;
exit: exit:
return err; return err;
@@ -167,20 +168,20 @@ exit:
int nvgpu_gsp_process_message(struct gk20a *g) int nvgpu_gsp_process_message(struct gk20a *g)
{ {
struct nvgpu_gsp *gsp = g->gsp; struct nvgpu_gsp_sched *gsp_sched = g->gsp_sched;
struct nv_flcn_msg_gsp msg; struct nv_flcn_msg_gsp msg;
bool read_msg; bool read_msg;
int status = 0; int status = 0;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
if (unlikely(!gsp->gsp_ready)) { if (unlikely(!gsp_sched->gsp_ready)) {
status = gsp_process_init_msg(gsp, &msg); status = gsp_process_init_msg(gsp_sched, &msg);
goto exit; goto exit;
} }
do { do {
read_msg = gsp_read_message(gsp, read_msg = gsp_read_message(gsp_sched,
GSP_NV_MSGQ_LOG_ID, &msg, &status); GSP_NV_MSGQ_LOG_ID, &msg, &status);
if (read_msg == false) { if (read_msg == false) {
break; break;
@@ -195,12 +196,12 @@ int nvgpu_gsp_process_message(struct gk20a *g)
msg.hdr.ctrl_flags &= ~GSP_CMD_FLAGS_MASK; msg.hdr.ctrl_flags &= ~GSP_CMD_FLAGS_MASK;
if (msg.hdr.ctrl_flags == GSP_CMD_FLAGS_EVENT) { if (msg.hdr.ctrl_flags == GSP_CMD_FLAGS_EVENT) {
gsp_handle_event(gsp, &msg); gsp_handle_event(gsp_sched, &msg);
} else { } else {
gsp_response_handle(gsp, &msg); gsp_response_handle(gsp_sched, &msg);
} }
if (!nvgpu_gsp_queue_is_empty(gsp->queues, if (!nvgpu_gsp_queue_is_empty(gsp_sched->queues,
GSP_NV_MSGQ_LOG_ID)) { GSP_NV_MSGQ_LOG_ID)) {
g->ops.gsp.set_msg_intr(g); g->ops.gsp.set_msg_intr(g);
} }
@@ -210,10 +211,9 @@ exit:
return status; return status;
} }
int nvgpu_gsp_wait_message_cond(struct nvgpu_gsp *gsp, u32 timeout_ms, int nvgpu_gsp_wait_message_cond(struct gk20a *g, u32 timeout_ms,
void *var, u8 val) void *var, u8 val)
{ {
struct gk20a *g = gsp->g;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US; u32 delay = POLL_DELAY_MIN_US;
@@ -224,8 +224,6 @@ int nvgpu_gsp_wait_message_cond(struct nvgpu_gsp *gsp, u32 timeout_ms,
return 0; return 0;
} }
g->ops.gsp.gsp_isr(g);
nvgpu_usleep_range(delay, delay * 2U); nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1U, POLL_DELAY_MAX_US); delay = min_t(u32, delay << 1U, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0); } while (nvgpu_timeout_expired(&timeout) == 0);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,7 @@
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "gsp_cmd.h" #include "gsp_cmd.h"
#include "../gsp_scheduler.h"
struct nvgpu_gsp; struct nvgpu_gsp;
@@ -68,7 +69,7 @@ struct nv_flcn_msg_gsp {
} msg; } msg;
}; };
int nvgpu_gsp_wait_message_cond(struct nvgpu_gsp *gsp, u32 timeout_ms, int nvgpu_gsp_wait_message_cond(struct gk20a *g, u32 timeout_ms,
void *var, u8 val); void *var, u8 val);
#endif /* NVGPU_GSP_MSG_H */ #endif /* NVGPU_GSP_MSG_H */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,7 +26,7 @@
#include <nvgpu/log.h> #include <nvgpu/log.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "../gsp_priv.h" #include "../gsp_scheduler.h"
#include "gsp_queue.h" #include "gsp_queue.h"
#include "gsp_msg.h" #include "gsp_msg.h"

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,7 +26,7 @@
#include <nvgpu/errno.h> #include <nvgpu/errno.h>
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include "../gsp_priv.h" #include "../gsp_scheduler.h"
#include "gsp_seq.h" #include "gsp_seq.h"
static void gsp_sequences_init(struct gk20a *g, static void gsp_sequences_init(struct gk20a *g,
@@ -47,7 +47,7 @@ static void gsp_sequences_init(struct gk20a *g,
} }
} }
int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp *gsp) int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched)
{ {
int err = 0; int err = 0;
struct gsp_sequences *seqs; struct gsp_sequences *seqs;
@@ -68,8 +68,8 @@ int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp *gsp)
return -ENOMEM; return -ENOMEM;
} }
gsp->sequences = seqs; gsp_sched->sequences = seqs;
gsp->sequences->seq = seqs->seq; gsp_sched->sequences->seq = seqs->seq;
nvgpu_mutex_init(&seqs->gsp_seq_lock); nvgpu_mutex_init(&seqs->gsp_seq_lock);

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -56,7 +56,7 @@ struct gsp_sequences {
struct nvgpu_mutex gsp_seq_lock; struct nvgpu_mutex gsp_seq_lock;
}; };
int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp *gsp); int nvgpu_gsp_sequences_init(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched);
void nvgpu_gsp_sequences_free(struct gk20a *g, void nvgpu_gsp_sequences_free(struct gk20a *g,
struct gsp_sequences *sequences); struct gsp_sequences *sequences);
int nvgpu_gsp_seq_acquire(struct gk20a *g, int nvgpu_gsp_seq_acquire(struct gk20a *g,

View File

@@ -0,0 +1,343 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* GSP Test Functions
*
* Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <nvgpu/firmware.h>
#include <nvgpu/falcon.h>
#include <nvgpu/dma.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/log.h>
#include <nvgpu/gsp.h>
#include <nvgpu/gsp/gsp_test.h>
#include "gsp_test.h"
u32 nvgpu_gsp_get_current_iteration(struct gk20a *g)
{
u32 data = 0;
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
nvgpu_log_fn(g, " ");
data = nvgpu_falcon_mailbox_read(gsp->gsp_flcn, FALCON_MAILBOX_1);
return data;
}
u32 nvgpu_gsp_get_current_test(struct gk20a *g)
{
u32 data = 0;
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
nvgpu_log_fn(g, " ");
data = nvgpu_falcon_mailbox_read(gsp->gsp_flcn, FALCON_MAILBOX_0);
return data;
}
bool nvgpu_gsp_get_test_fail_status(struct gk20a *g)
{
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
return gsp_stest->gsp_test.stress_test_fail_status;
}
bool nvgpu_gsp_get_stress_test_start(struct gk20a *g)
{
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
return gsp_stest->gsp_test.enable_stress_test;
}
bool nvgpu_gsp_get_stress_test_load(struct gk20a *g)
{
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
if (gsp_stest == NULL)
return false;
return gsp_stest->gsp_test.load_stress_test;
}
void nvgpu_gsp_set_test_fail_status(struct gk20a *g, bool val)
{
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
gsp_stest->gsp_test.stress_test_fail_status = val;
}
int nvgpu_gsp_set_stress_test_start(struct gk20a *g, bool flag)
{
int err = 0;
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
nvgpu_log_fn(g, " ");
if (flag) {
nvgpu_info(g, "Enabling GSP test");
nvgpu_falcon_mailbox_write(gsp->gsp_flcn, FALCON_MAILBOX_1, 0xFFFFFFFF);
} else {
nvgpu_info(g, "Halting GSP test");
nvgpu_gsp_stress_test_halt(g, false);
}
gsp_stest->gsp_test.enable_stress_test = flag;
return err;
}
int nvgpu_gsp_set_stress_test_load(struct gk20a *g, bool flag)
{
int err = 0;
nvgpu_log_fn(g, " ");
if (flag)
err = nvgpu_gsp_stress_test_bootstrap(g, flag);
return err;
}
static void gsp_test_get_file_names(struct gk20a *g, struct gsp_fw *gsp_ucode)
{
/*
* TODO Switch to GSP specific register
*/
if (g->ops.pmu.is_debug_mode_enabled(g)) {
gsp_ucode->code_name = GSPDBG_RISCV_STRESS_TEST_FW_CODE;
gsp_ucode->data_name = GSPDBG_RISCV_STRESS_TEST_FW_DATA;
gsp_ucode->manifest_name = GSPDBG_RISCV_STRESS_TEST_FW_MANIFEST;
} else {
gsp_ucode->code_name = GSPPROD_RISCV_STRESS_TEST_FW_CODE;
gsp_ucode->data_name = GSPPROD_RISCV_STRESS_TEST_FW_DATA;
gsp_ucode->manifest_name = GSPPROD_RISCV_STRESS_TEST_FW_MANIFEST;
}
}
void nvgpu_gsp_write_test_sysmem_addr(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
struct nvgpu_falcon *flcn;
u64 sysmem_addr;
struct nvgpu_gsp_test *gsp_stest;
flcn = gsp->gsp_flcn;
gsp_stest = g->gsp_stest;
sysmem_addr = nvgpu_mem_get_addr(g, &gsp_stest->gsp_test.gsp_test_sysmem_block);
nvgpu_falcon_mailbox_write(flcn, FALCON_MAILBOX_0, u64_lo32(sysmem_addr));
nvgpu_falcon_mailbox_write(flcn, FALCON_MAILBOX_1, u64_hi32(sysmem_addr));
}
int nvgpu_gsp_stress_test_bootstrap(struct gk20a *g, bool start)
{
int err = 0;
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
nvgpu_log_fn(g, " ");
if (gsp_stest == NULL) {
nvgpu_err(g, "GSP not initialized");
err = -EFAULT;
goto exit;
}
if (!start && !(gsp_stest->gsp_test.load_stress_test))
return err;
if (start) {
err = nvgpu_dma_alloc_flags_sys(g,
NVGPU_DMA_PHYSICALLY_ADDRESSED,
SZ_64K,
&g->gsp_stest->gsp_test.gsp_test_sysmem_block);
if (err != 0) {
nvgpu_err(g, "GSP test memory alloc failed");
goto exit;
}
}
gsp_stest->gsp_test.load_stress_test = true;
#ifdef CONFIG_NVGPU_FALCON_DEBUG
err = nvgpu_gsp_debug_buf_init(g, GSP_TEST_DEBUG_BUFFER_QUEUE,
GSP_TEST_DMESG_BUFFER_SIZE);
if (err != 0) {
nvgpu_err(g, "GSP sched debug buf init failed");
goto exit;
}
#endif
gsp_test_get_file_names(g, &gsp->gsp_ucode);
err = nvgpu_gsp_bootstrap_ns(g, gsp);
if (err != 0) {
nvgpu_err(g, "GSP bootstrap failed for stress test");
goto exit;
}
/* wait for mailbox-0 update with non-zero value */
err = nvgpu_gsp_wait_for_mailbox_update(gsp, 0x0,
GSP_STRESS_TEST_MAILBOX_PASS, GSP_WAIT_TIME_MS);
if (err != 0) {
nvgpu_err(g, "gsp ucode failed to update mailbox-0");
}
if (gsp_stest->gsp_test.enable_stress_test) {
nvgpu_info(g, "Restarting GSP stress test");
nvgpu_falcon_mailbox_write(gsp->gsp_flcn, FALCON_MAILBOX_1, 0xFFFFFFFF);
}
return err;
exit:
gsp_stest->gsp_test.load_stress_test = false;
return err;
}
int nvgpu_gsp_stress_test_halt(struct gk20a *g, bool restart)
{
int err = 0;
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
nvgpu_log_fn(g, " ");
if (gsp == NULL) {
nvgpu_info(g, "GSP not initialized");
goto exit;
}
nvgpu_gsp_suspend(g, gsp);
if (restart && (gsp_stest->gsp_test.load_stress_test == false)) {
nvgpu_info(g, "GSP stress test not loaded ");
goto exit;
}
err = nvgpu_falcon_reset(gsp->gsp_flcn);
if (err != 0) {
nvgpu_err(g, "gsp reset failed err=%d", err);
goto exit;
}
if (!restart) {
gsp_stest->gsp_test.load_stress_test = false;
nvgpu_dma_free(g, &gsp_stest->gsp_test.gsp_test_sysmem_block);
}
exit:
return err;
}
bool nvgpu_gsp_is_stress_test(struct gk20a *g)
{
if (g->gsp_stest->gsp_test.load_stress_test)
return true;
else
return false;
}
static void gsp_test_sw_deinit(struct gk20a *g, struct nvgpu_gsp_test *gsp_stest)
{
nvgpu_dma_free(g, &gsp_stest->gsp_test.gsp_test_sysmem_block);
nvgpu_kfree(g, gsp_stest);
gsp_stest = NULL;
}
void nvgpu_gsp_test_sw_deinit(struct gk20a *g)
{
struct nvgpu_gsp_test *gsp_stest = g->gsp_stest;
nvgpu_log_fn(g, " ");
if (gsp_stest == NULL) {
nvgpu_info(g, "GSP stest not initialized");
return;
}
if (gsp_stest->gsp != NULL) {
nvgpu_gsp_sw_deinit(g, gsp_stest->gsp);
}
if (gsp_stest != NULL) {
gsp_test_sw_deinit(g, gsp_stest);
}
}
int nvgpu_gsp_stress_test_sw_init(struct gk20a *g)
{
int err = 0;
struct nvgpu_gsp_test *gsp_stest;
struct nvgpu_gsp *gsp;
nvgpu_log_fn(g, " ");
if (g->gsp_stest != NULL) {
/*
* Recovery/unrailgate case, we do not need to do gsp_stest init as
* gsp_stest is set during cold boot & doesn't execute gsp_stest clean
* up as part of power off sequence, so reuse to perform faster boot.
*/
nvgpu_gsp_stress_test_bootstrap(g, false);
return err;
}
/* Init struct holding the gsp_stest software state */
g->gsp_stest = (struct nvgpu_gsp_test *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_gsp_test));
if (g->gsp_stest == NULL) {
err = -ENOMEM;
goto de_init;
}
/* Init struct holding the gsp software state */
g->gsp_stest->gsp = (struct nvgpu_gsp *)
nvgpu_kzalloc(g, sizeof(struct nvgpu_gsp));
if (g->gsp_stest->gsp == NULL) {
err = -ENOMEM;
goto de_init;
}
gsp_stest = g->gsp_stest;
gsp = g->gsp_stest->gsp;
/* gsp falcon software state */
gsp->gsp_flcn = &g->gsp_flcn;
gsp->g = g;
/* Init isr mutex */
nvgpu_mutex_init(&gsp->isr_mutex);
nvgpu_log_fn(g, " Done ");
return err;
de_init:
nvgpu_gsp_test_sw_deinit(g);
return err;
}
void nvgpu_gsp_stest_isr(struct gk20a *g)
{
struct nvgpu_gsp *gsp = g->gsp_stest->gsp;
g->ops.gsp.gsp_isr(g, gsp);
}

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -20,25 +20,22 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef NVGPU_GSP_PRIV #ifndef GSP_TEST_H
#define NVGPU_GSP_PRIV #define GSP_TEST_H
#include <nvgpu/lock.h> #define GSP_TEST_DEBUG_BUFFER_QUEUE 3U
#include <nvgpu/nvgpu_mem.h> #define GSP_TEST_DMESG_BUFFER_SIZE 0xC00U
#define GSP_DEBUG_BUFFER_QUEUE 3U #define GSPDBG_RISCV_STRESS_TEST_FW_MANIFEST "gsp-stress.manifest.encrypt.bin.out.bin"
#define GSP_DMESG_BUFFER_SIZE 0xC00U #define GSPDBG_RISCV_STRESS_TEST_FW_CODE "gsp-stress.text.encrypt.bin"
#define GSPDBG_RISCV_STRESS_TEST_FW_DATA "gsp-stress.data.encrypt.bin"
#define GSP_QUEUE_NUM 2U #define GSPPROD_RISCV_STRESS_TEST_FW_MANIFEST "gsp-stress.manifest.encrypt.bin.out.bin.prod"
#define GSPPROD_RISCV_STRESS_TEST_FW_CODE "gsp-stress.text.encrypt.bin.prod"
#define GSPPROD_RISCV_STRESS_TEST_FW_DATA "gsp-stress.data.encrypt.bin.prod"
struct gsp_fw { #define GSP_STRESS_TEST_MAILBOX_PASS 0xAAAAAAAA
/* gsp ucode */
struct nvgpu_firmware *code;
struct nvgpu_firmware *data;
struct nvgpu_firmware *manifest;
};
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
struct gsp_stress_test { struct gsp_stress_test {
bool load_stress_test; bool load_stress_test;
bool enable_stress_test; bool enable_stress_test;
@@ -47,28 +44,11 @@ struct gsp_stress_test {
u32 test_name; u32 test_name;
struct nvgpu_mem gsp_test_sysmem_block; struct nvgpu_mem gsp_test_sysmem_block;
}; };
#endif
/* GSP descriptor's */ /* GSP descriptor's */
struct nvgpu_gsp { struct nvgpu_gsp_test {
struct gk20a *g; struct nvgpu_gsp *gsp;
struct gsp_fw gsp_ucode;
struct nvgpu_falcon *gsp_flcn;
bool isr_enabled;
struct nvgpu_mutex isr_mutex;
struct gsp_sequences *sequences;
struct nvgpu_engine_mem_queue *queues[GSP_QUEUE_NUM];
u32 command_ack;
/* set to true once init received */
bool gsp_ready;
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
struct gsp_stress_test gsp_test; struct gsp_stress_test gsp_test;
#endif
}; };
#endif /* NVGPU_GSP_PRIV */
#endif /* GSP_TEST_H */

View File

@@ -44,6 +44,10 @@
#include <nvgpu/power_features/cg.h> #include <nvgpu/power_features/cg.h>
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include <nvgpu/gsp.h> #include <nvgpu/gsp.h>
#include <nvgpu/gsp_sched.h>
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
#include <nvgpu/gsp/gsp_test.h>
#endif #endif
#include <nvgpu/pm_reservation.h> #include <nvgpu/pm_reservation.h>
#include <nvgpu/netlist.h> #include <nvgpu/netlist.h>
@@ -346,6 +350,7 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
ret = tmp_ret; ret = tmp_ret;
} }
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST #ifdef CONFIG_NVGPU_GSP_STRESS_TEST
tmp_ret = nvgpu_gsp_stress_test_halt(g, true); tmp_ret = nvgpu_gsp_stress_test_halt(g, true);
if (tmp_ret != 0) { if (tmp_ret != 0) {
@@ -353,8 +358,9 @@ int nvgpu_prepare_poweroff(struct gk20a *g)
nvgpu_err(g, "Failed to halt GSP stress test"); nvgpu_err(g, "Failed to halt GSP stress test");
} }
#endif #endif
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) #ifdef CONFIG_NVGPU_GSP_SCHEDULER
nvgpu_gsp_suspend(g); nvgpu_gsp_sched_suspend(g, g->gsp_sched);
#endif
#endif #endif
nvgpu_falcons_sw_free(g); nvgpu_falcons_sw_free(g);
@@ -948,9 +954,14 @@ int nvgpu_finalize_poweron(struct gk20a *g)
#endif #endif
NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch, NVGPU_INIT_TABLE_ENTRY(g->ops.channel.resume_all_serviceable_ch,
NO_FLAG), NO_FLAG),
#if defined(CONFIG_NVGPU_GSP_SCHEDULER) || defined(CONFIG_NVGPU_GSP_STRESS_TEST) #ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
/* Init gsp ops */ /* Init gsp ops */
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sw_init, NO_FLAG), NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_sched_sw_init, NO_FLAG),
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
NVGPU_INIT_TABLE_ENTRY(&nvgpu_gsp_stress_test_sw_init, NO_FLAG),
#endif
#endif #endif
}; };
size_t i; size_t i;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -177,7 +177,7 @@ void ga10b_gsp_handle_interrupts(struct gk20a *g, u32 intr)
} }
} }
void ga10b_gsp_isr(struct gk20a *g) void ga10b_gsp_isr(struct gk20a *g, struct nvgpu_gsp *gsp)
{ {
u32 intr = 0U; u32 intr = 0U;
u32 mask = 0U; u32 mask = 0U;
@@ -189,8 +189,8 @@ void ga10b_gsp_isr(struct gk20a *g)
return; return;
} }
nvgpu_gsp_isr_mutex_aquire(g); nvgpu_gsp_isr_mutex_acquire(g, gsp);
if (!nvgpu_gsp_is_isr_enable(g)) { if (!nvgpu_gsp_is_isr_enable(g, gsp)) {
goto exit; goto exit;
} }
@@ -212,69 +212,21 @@ void ga10b_gsp_isr(struct gk20a *g)
ga10b_gsp_handle_interrupts(g, intr); ga10b_gsp_handle_interrupts(g, intr);
exit: exit:
nvgpu_gsp_isr_mutex_release(g); nvgpu_gsp_isr_mutex_release(g, gsp);
}
static void ga10b_riscv_set_irq(struct gk20a *g, bool set_irq,
u32 intr_mask, u32 intr_dest)
{
if (set_irq) {
gk20a_writel(g, pgsp_riscv_irqmset_r(), intr_mask);
gk20a_writel(g, pgsp_riscv_irqdest_r(), intr_dest);
} else {
gk20a_writel(g, pgsp_riscv_irqmclr_r(), 0xffffffffU);
}
} }
void ga10b_gsp_enable_irq(struct gk20a *g, bool enable) void ga10b_gsp_enable_irq(struct gk20a *g, bool enable)
{ {
u32 intr_mask;
u32 intr_dest;
bool skip_priv = false;
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
if (nvgpu_gsp_is_stress_test(g))
skip_priv = true;
#endif
/* clear before setting required irq */
if ((!skip_priv) || (!enable))
ga10b_riscv_set_irq(g, false, 0x0, 0x0);
nvgpu_cic_mon_intr_stall_unit_config(g, nvgpu_cic_mon_intr_stall_unit_config(g,
NVGPU_CIC_INTR_UNIT_GSP, NVGPU_CIC_INTR_DISABLE); NVGPU_CIC_INTR_UNIT_GSP, NVGPU_CIC_INTR_DISABLE);
if (enable) { if (enable) {
if (!skip_priv) {
/* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
intr_dest = pgsp_riscv_irqdest_gptmr_f(0) |
pgsp_riscv_irqdest_wdtmr_f(1) |
pgsp_riscv_irqdest_mthd_f(0) |
pgsp_riscv_irqdest_ctxsw_f(0) |
pgsp_riscv_irqdest_halt_f(1) |
pgsp_riscv_irqdest_exterr_f(0) |
pgsp_riscv_irqdest_swgen0_f(1) |
pgsp_riscv_irqdest_swgen1_f(1) |
pgsp_riscv_irqdest_ext_f(0xff);
/* 0=disable, 1=enable */
intr_mask = pgsp_riscv_irqmset_gptmr_f(1) |
pgsp_riscv_irqmset_wdtmr_f(1) |
pgsp_riscv_irqmset_mthd_f(0) |
pgsp_riscv_irqmset_ctxsw_f(0) |
pgsp_riscv_irqmset_halt_f(1) |
pgsp_riscv_irqmset_exterr_f(1) |
pgsp_riscv_irqmset_swgen0_f(1) |
pgsp_riscv_irqmset_swgen1_f(1);
/* set required irq */
ga10b_riscv_set_irq(g, true, intr_mask, intr_dest);
}
nvgpu_cic_mon_intr_stall_unit_config(g, nvgpu_cic_mon_intr_stall_unit_config(g,
NVGPU_CIC_INTR_UNIT_GSP, NVGPU_CIC_INTR_ENABLE); NVGPU_CIC_INTR_UNIT_GSP, NVGPU_CIC_INTR_ENABLE);
/* Configuring RISCV interrupts is expected to be done inside firmware */
} }
} }

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -47,7 +47,7 @@ int ga10b_gsp_flcn_copy_from_emem(struct gk20a *g,
/* interrupt */ /* interrupt */
void ga10b_gsp_enable_irq(struct gk20a *g, bool enable); void ga10b_gsp_enable_irq(struct gk20a *g, bool enable);
void ga10b_gsp_isr(struct gk20a *g); void ga10b_gsp_isr(struct gk20a *g, struct nvgpu_gsp *gsp);
void ga10b_gsp_set_msg_intr(struct gk20a *g); void ga10b_gsp_set_msg_intr(struct gk20a *g);
#endif /* CONFIG_NVGPU_GSP_SCHEDULER */ #endif /* CONFIG_NVGPU_GSP_SCHEDULER */
#endif /* GSP_GA10B_H */ #endif /* GSP_GA10B_H */

View File

@@ -30,6 +30,9 @@
#include <nvgpu/cic_mon.h> #include <nvgpu/cic_mon.h>
#include <nvgpu/power_features/pg.h> #include <nvgpu/power_features/pg.h>
#include <nvgpu/gr/gr_instances.h> #include <nvgpu/gr/gr_instances.h>
#ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include <nvgpu/gsp.h>
#endif
#include "mc_intr_ga10b.h" #include "mc_intr_ga10b.h"
@@ -703,7 +706,7 @@ static void ga10b_intr_isr_stall_host2soc_2(struct gk20a *g)
&unit_subtree_mask) == true) { &unit_subtree_mask) == true) {
handled_subtree_mask |= unit_subtree_mask; handled_subtree_mask |= unit_subtree_mask;
ga10b_intr_subtree_clear(g, subtree, unit_subtree_mask); ga10b_intr_subtree_clear(g, subtree, unit_subtree_mask);
g->ops.gsp.gsp_isr(g); nvgpu_gsp_isr(g);
} }
#endif /* CONFIG_NVGPU_GSP_SCHEDULER */ #endif /* CONFIG_NVGPU_GSP_SCHEDULER */

View File

@@ -110,7 +110,10 @@ struct clk_domains_mon_status_params;
struct nvgpu_cic_mon; struct nvgpu_cic_mon;
struct nvgpu_cic_rm; struct nvgpu_cic_rm;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
struct nvgpu_gsp; struct nvgpu_gsp_sched;
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
struct nvgpu_gsp_test;
#endif #endif
enum nvgpu_flush_op; enum nvgpu_flush_op;
@@ -496,7 +499,10 @@ struct gk20a {
struct nvgpu_acr *acr; struct nvgpu_acr *acr;
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
/** Pointer to struct maintaining GSP unit's software state. */ /** Pointer to struct maintaining GSP unit's software state. */
struct nvgpu_gsp *gsp; struct nvgpu_gsp_sched *gsp_sched;
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
struct nvgpu_gsp_test *gsp_stest;
#endif #endif
/** Top level struct maintaining ECC unit's software state. */ /** Top level struct maintaining ECC unit's software state. */
struct nvgpu_ecc ecc; struct nvgpu_ecc ecc;

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -27,6 +27,8 @@
struct gk20a; struct gk20a;
struct nvgpu_gsp; struct nvgpu_gsp;
#define GSP_WAIT_TIME_MS 10000U
struct gops_gsp { struct gops_gsp {
u32 (*falcon_base_addr)(void); u32 (*falcon_base_addr)(void);
u32 (*falcon2_base_addr)(void); u32 (*falcon2_base_addr)(void);
@@ -50,7 +52,7 @@ struct gops_gsp {
void (*msgq_tail)(struct gk20a *g, struct nvgpu_gsp *gsp, void (*msgq_tail)(struct gk20a *g, struct nvgpu_gsp *gsp,
u32 *tail, bool set); u32 *tail, bool set);
void (*enable_irq)(struct gk20a *g, bool enable); void (*enable_irq)(struct gk20a *g, bool enable);
void (*gsp_isr)(struct gk20a *g); void (*gsp_isr)(struct gk20a *g, struct nvgpu_gsp *gsp);
void (*set_msg_intr)(struct gk20a *g); void (*set_msg_intr)(struct gk20a *g);
#endif /* CONFIG_NVGPU_GSP_SCHEDULER */ #endif /* CONFIG_NVGPU_GSP_SCHEDULER */
}; };

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -22,23 +22,45 @@
#ifndef NVGPU_GSP #ifndef NVGPU_GSP
#define NVGPU_GSP #define NVGPU_GSP
#include <nvgpu/lock.h>
#include <nvgpu/nvgpu_mem.h>
struct gk20a; struct gk20a;
struct nvgpu_gsp;
int nvgpu_gsp_sw_init(struct gk20a *g); struct gsp_fw {
int nvgpu_gsp_bootstrap(struct gk20a *g); /* gsp ucode name */
void nvgpu_gsp_suspend(struct gk20a *g); char *code_name;
void nvgpu_gsp_sw_deinit(struct gk20a *g); char *data_name;
void nvgpu_gsp_isr_support(struct gk20a *g, bool enable); char *manifest_name;
void nvgpu_gsp_isr_mutex_aquire(struct gk20a *g);
void nvgpu_gsp_isr_mutex_release(struct gk20a *g); /* gsp ucode */
bool nvgpu_gsp_is_isr_enable(struct gk20a *g); struct nvgpu_firmware *code;
struct nvgpu_firmware *data;
struct nvgpu_firmware *manifest;
};
/* GSP descriptor's */
struct nvgpu_gsp {
struct gk20a *g;
struct gsp_fw gsp_ucode;
struct nvgpu_falcon *gsp_flcn;
bool isr_enabled;
struct nvgpu_mutex isr_mutex;
};
int nvgpu_gsp_debug_buf_init(struct gk20a *g, u32 queue_no, u32 buffer_size);
void nvgpu_gsp_suspend(struct gk20a *g, struct nvgpu_gsp *gsp);
void nvgpu_gsp_sw_deinit(struct gk20a *g, struct nvgpu_gsp *gsp);
void nvgpu_gsp_isr_mutex_acquire(struct gk20a *g, struct nvgpu_gsp *gsp);
void nvgpu_gsp_isr_mutex_release(struct gk20a *g, struct nvgpu_gsp *gsp);
bool nvgpu_gsp_is_isr_enable(struct gk20a *g, struct nvgpu_gsp *gsp);
u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g); u32 nvgpu_gsp_get_last_cmd_id(struct gk20a *g);
struct nvgpu_falcon *nvgpu_gsp_falcon_instance(struct gk20a *g); struct nvgpu_falcon *nvgpu_gsp_falcon_instance(struct gk20a *g);
int nvgpu_gsp_process_message(struct gk20a *g); int nvgpu_gsp_process_message(struct gk20a *g);
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST int nvgpu_gsp_wait_for_mailbox_update(struct nvgpu_gsp *gsp,
int nvgpu_gsp_stress_test_bootstrap(struct gk20a *g, bool start); u32 mailbox_index, u32 exp_value, signed int timeoutms);
int nvgpu_gsp_stress_test_halt(struct gk20a *g, bool restart); int nvgpu_gsp_bootstrap_ns(struct gk20a *g, struct nvgpu_gsp *gsp);
bool nvgpu_gsp_is_stress_test(struct gk20a *g); void nvgpu_gsp_isr(struct gk20a *g);
#endif void nvgpu_gsp_isr_support(struct gk20a *g, struct nvgpu_gsp *gsp, bool enable);
#endif /* NVGPU_GSP */ #endif /* NVGPU_GSP */

View File

@@ -2,7 +2,7 @@
/* /*
* GSP Tests * GSP Tests
* *
* Copyright (c) 2021, NVIDIA Corporation. All rights reserved. * Copyright (c) 2021-2022, NVIDIA Corporation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
@@ -30,4 +30,11 @@ bool nvgpu_gsp_get_stress_test_start(struct gk20a *g);
int nvgpu_gsp_set_stress_test_start(struct gk20a *g, bool flag); int nvgpu_gsp_set_stress_test_start(struct gk20a *g, bool flag);
bool nvgpu_gsp_get_stress_test_load(struct gk20a *g); bool nvgpu_gsp_get_stress_test_load(struct gk20a *g);
int nvgpu_gsp_set_stress_test_load(struct gk20a *g, bool flag); int nvgpu_gsp_set_stress_test_load(struct gk20a *g, bool flag);
int nvgpu_gsp_stress_test_bootstrap(struct gk20a *g, bool start);
int nvgpu_gsp_stress_test_halt(struct gk20a *g, bool restart);
bool nvgpu_gsp_is_stress_test(struct gk20a *g);
int nvgpu_gsp_stress_test_sw_init(struct gk20a *g);
void nvgpu_gsp_test_sw_deinit(struct gk20a *g);
void nvgpu_gsp_write_test_sysmem_addr(struct gk20a *g);
void nvgpu_gsp_stest_isr(struct gk20a *g);
#endif /* NVGPU_GSP_TEST */ #endif /* NVGPU_GSP_TEST */

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -20,12 +20,14 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#ifndef NVGPU_GSP_BOOTSTRAP #ifndef GSP_SCHED_H
#define NVGPU_GSP_BOOTSTRAP #define GSP_SCHED_H
struct nvgpu_gsp; struct gk20a;
struct nvgpu_gsp_sched;
#define GSP_UCODE_SIZE_MAX (256U * 1024U) int nvgpu_gsp_sched_bootstrap_ns(struct gk20a *g);
int nvgpu_gsp_sched_sw_init(struct gk20a *g);
int gsp_bootstrap_ns(struct gk20a *g, struct nvgpu_gsp *gsp); void nvgpu_gsp_sched_sw_deinit(struct gk20a *g);
void nvgpu_gsp_sched_suspend(struct gk20a *g, struct nvgpu_gsp_sched *gsp_sched);
#endif /* NVGPU_GSP_BOOTSTRAP */ void nvgpu_gsp_sched_isr(struct gk20a *g);
#endif /* GSP_SCHED_H */

View File

@@ -93,6 +93,10 @@
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
#include "nvgpu/gsp.h" #include "nvgpu/gsp.h"
#include "nvgpu/gsp_sched.h"
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
#include "nvgpu/gsp/gsp_test.h"
#endif #endif
#ifdef CONFIG_NVGPU_SUPPORT_CDE #ifdef CONFIG_NVGPU_SUPPORT_CDE
@@ -1063,8 +1067,13 @@ void gk20a_remove_support(struct gk20a *g)
nvgpu_free_cyclestats_snapshot_data(g); nvgpu_free_cyclestats_snapshot_data(g);
#endif #endif
#ifndef CONFIG_NVGPU_DGPU
#ifdef CONFIG_NVGPU_GSP_SCHEDULER #ifdef CONFIG_NVGPU_GSP_SCHEDULER
nvgpu_gsp_sw_deinit(g); nvgpu_gsp_sched_sw_deinit(g);
#endif
#ifdef CONFIG_NVGPU_GSP_STRESS_TEST
nvgpu_gsp_test_sw_deinit(g);
#endif
#endif #endif
nvgpu_fbp_remove_support(g); nvgpu_fbp_remove_support(g);