From 085f94bf8904c5cde485f5bb2b52dbda0fd5213c Mon Sep 17 00:00:00 2001 From: Ramesh Mylavarapu Date: Tue, 7 Sep 2021 21:00:03 +0530 Subject: [PATCH] gpu: nvgpu: add queue support for gsp cmd/msg implemented queue support which is needed for cmd/msg for managing CMDQ/MSGQ. In ga10b GSP, totally 4 CMDQ and 4 MSGQ supported. in current implementation we use only one CMDQ and one MSGQ. NVGPU-6784 Signed-off-by: Ramesh Mylavarapu Change-Id: Ib40ff9df6580e15824131dd6f54bfb85dce8e594 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2590678 Tested-by: mobile promotions Reviewed-by: mobile promotions --- arch/nvgpu-common.yaml | 2 + drivers/gpu/nvgpu/Makefile | 3 +- drivers/gpu/nvgpu/Makefile.sources | 3 +- drivers/gpu/nvgpu/common/gsp/gsp_init.c | 3 + drivers/gpu/nvgpu/common/gsp/gsp_priv.h | 4 + drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.c | 191 +++++++++++++++++++ drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.h | 54 ++++++ 7 files changed, 258 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.c create mode 100644 drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.h diff --git a/arch/nvgpu-common.yaml b/arch/nvgpu-common.yaml index 7ca2b730b..20c7e2171 100644 --- a/arch/nvgpu-common.yaml +++ b/arch/nvgpu-common.yaml @@ -314,6 +314,8 @@ gsp: include/nvgpu/gsp/gsp_test.h, common/gsp/ipc/gsp_seq.c, common/gsp/ipc/gsp_seq.h, + common/gsp/ipc/gsp_queue.c, + common/gsp/ipc/gsp_queue.h, include/nvgpu/gsp.h ] engine_queues: diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index 0dffbe160..55ecb92de 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -411,7 +411,8 @@ ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),y) nvgpu-$(CONFIG_NVGPU_GSP_SCHEDULER) += \ common/gsp/gsp_init.o \ common/gsp/gsp_bootstrap.o \ - common/gsp/ipc/gsp_seq.o + common/gsp/ipc/gsp_seq.o \ + common/gsp/ipc/gsp_queue.o endif ifeq ($(CONFIG_NVGPU_GSP_STRESS_TEST),y) diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index 2c4902f52..62aae46af 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -181,7 +181,8 @@ srcs += common/device.c \ ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1) srcs += common/gsp/gsp_init.c \ common/gsp/gsp_bootstrap.c \ - common/gsp/ipc/gsp_seq.c + common/gsp/ipc/gsp_seq.c \ + common/gsp/ipc/gsp_queue.c endif # Source files below are functionaly safe (FuSa) and must always be included. diff --git a/drivers/gpu/nvgpu/common/gsp/gsp_init.c b/drivers/gpu/nvgpu/common/gsp/gsp_init.c index d05fa4e31..eef2395e6 100644 --- a/drivers/gpu/nvgpu/common/gsp/gsp_init.c +++ b/drivers/gpu/nvgpu/common/gsp/gsp_init.c @@ -30,6 +30,7 @@ #endif #include "ipc/gsp_seq.h" +#include "ipc/gsp_queue.h" #include "gsp_priv.h" #include "gsp_bootstrap.h" @@ -61,6 +62,8 @@ void nvgpu_gsp_sw_deinit(struct gk20a *g) #endif nvgpu_gsp_sequences_free(g, g->gsp->sequences); + nvgpu_gsp_queues_free(g, g->gsp->queues); + nvgpu_kfree(g, g->gsp); g->gsp = NULL; } diff --git a/drivers/gpu/nvgpu/common/gsp/gsp_priv.h b/drivers/gpu/nvgpu/common/gsp/gsp_priv.h index 314116ce6..60030e5b8 100644 --- a/drivers/gpu/nvgpu/common/gsp/gsp_priv.h +++ b/drivers/gpu/nvgpu/common/gsp/gsp_priv.h @@ -29,6 +29,8 @@ #define GSP_DEBUG_BUFFER_QUEUE 3U #define GSP_DMESG_BUFFER_SIZE 0xC00U +#define GSP_QUEUE_NUM 2U + struct gsp_fw { /* gsp ucode */ struct nvgpu_firmware *code; @@ -58,6 +60,8 @@ struct nvgpu_gsp { struct gsp_sequences *sequences; + struct nvgpu_engine_mem_queue *queues[GSP_QUEUE_NUM]; + #ifdef CONFIG_NVGPU_GSP_STRESS_TEST struct gsp_stress_test gsp_test; #endif diff --git a/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.c b/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.c new file mode 100644 index 000000000..e08b27402 --- /dev/null +++ b/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "../gsp_priv.h" +#include "gsp_queue.h" +#include "gsp_msg.h" + +/* gsp falcon queue init */ +static int gsp_queue_init(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, u32 id, + struct gsp_init_msg_gsp_init *init) +{ + struct nvgpu_engine_mem_queue_params params = {0}; + u32 queue_log_id = 0; + u32 oflag = 0; + int err = 0; + + if (id == GSP_NV_CMDQ_LOG_ID) { + /* + * set OFLAG_WRITE for command queue + * i.e, push from nvgpu & + * pop form falcon ucode + */ + oflag = OFLAG_WRITE; + } else if (id == GSP_NV_MSGQ_LOG_ID) { + /* + * set OFLAG_READ for message queue + * i.e, push from falcon ucode & + * pop form nvgpu + */ + oflag = OFLAG_READ; + } else { + nvgpu_err(g, "invalid queue-id %d", id); + err = -EINVAL; + goto exit; + } + + /* init queue parameters */ + queue_log_id = init->q_info[id].queue_log_id; + + params.g = g; + params.flcn_id = FALCON_ID_GSPLITE; + params.id = queue_log_id; + params.index = init->q_info[id].queue_phy_id; + params.offset = init->q_info[id].queue_offset; + params.position = init->q_info[id].queue_offset; + params.size = init->q_info[id].queue_size; + params.oflag = oflag; + params.queue_head = g->ops.gsp.gsp_queue_head; + params.queue_tail = g->ops.gsp.gsp_queue_tail; + params.queue_type = QUEUE_TYPE_EMEM; + + err = nvgpu_engine_mem_queue_init(&queues[queue_log_id], + params); + if (err != 0) { + nvgpu_err(g, "queue-%d init failed", queue_log_id); + } + +exit: + return err; +} + +static void gsp_queue_free(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, u32 id) +{ + if ((id != GSP_NV_CMDQ_LOG_ID) && (id != GSP_NV_MSGQ_LOG_ID)) { + nvgpu_err(g, "invalid queue-id %d", id); + return; + } + + if (queues[id] == NULL) { + return; + } + + nvgpu_engine_mem_queue_free(&queues[id]); +} + +int nvgpu_gsp_queues_init(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, + struct gsp_init_msg_gsp_init *init) +{ + u32 i, j; + int err; + + for (i = 0; i < GSP_QUEUE_NUM; i++) { + err = gsp_queue_init(g, queues, i, init); + if (err != 0) { + for (j = 0; j < i; j++) { + gsp_queue_free(g, queues, j); + } + nvgpu_err(g, "GSP queue init failed"); + return err; + } + } + + return 0; +} + +void nvgpu_gsp_queues_free(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues) +{ + u32 i; + + for (i = 0; i < GSP_QUEUE_NUM; i++) { + gsp_queue_free(g, queues, i); + } +} + +u32 nvgpu_gsp_queue_get_size(struct nvgpu_engine_mem_queue **queues, + u32 queue_id) +{ + return nvgpu_engine_mem_queue_get_size(queues[queue_id]); +} + +int nvgpu_gsp_queue_push(struct nvgpu_engine_mem_queue **queues, + u32 queue_id, struct nvgpu_falcon *flcn, + struct nv_flcn_cmd_gsp *cmd, u32 size) +{ + struct nvgpu_engine_mem_queue *queue; + + queue = queues[queue_id]; + return nvgpu_engine_mem_queue_push(flcn, queue, cmd, size); +} + +bool nvgpu_gsp_queue_is_empty(struct nvgpu_engine_mem_queue **queues, + u32 queue_id) +{ + struct nvgpu_engine_mem_queue *queue = queues[queue_id]; + + return nvgpu_engine_mem_queue_is_empty(queue); +} + +bool nvgpu_gsp_queue_read(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, + u32 queue_id, struct nvgpu_falcon *flcn, void *data, + u32 bytes_to_read, int *status) +{ + struct nvgpu_engine_mem_queue *queue = queues[queue_id]; + u32 bytes_read; + int err; + + err = nvgpu_engine_mem_queue_pop(flcn, queue, data, + bytes_to_read, &bytes_read); + if (err != 0) { + nvgpu_err(g, "fail to read msg: err %d", err); + *status = err; + return false; + } + if (bytes_read != bytes_to_read) { + nvgpu_err(g, "fail to read requested bytes: 0x%x != 0x%x", + bytes_to_read, bytes_read); + *status = -EINVAL; + return false; + } + + return true; +} + +int nvgpu_gsp_queue_rewind(struct nvgpu_falcon *flcn, + struct nvgpu_engine_mem_queue **queues, + u32 queue_id) +{ + struct nvgpu_engine_mem_queue *queue = queues[queue_id]; + + return nvgpu_engine_mem_queue_rewind(flcn, queue); +} diff --git a/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.h b/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.h new file mode 100644 index 000000000..e11b1282c --- /dev/null +++ b/drivers/gpu/nvgpu/common/gsp/ipc/gsp_queue.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_GSP_QUEUE_H +#define NVGPU_GSP_QUEUE_H + +#include + +struct gk20a; +struct nvgpu_falcon; +struct nv_flcn_cmd_gsp; +struct nvgpu_engine_mem_queue; +struct gsp_init_msg_gsp_init; + +int nvgpu_gsp_queues_init(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, + struct gsp_init_msg_gsp_init *init); +void nvgpu_gsp_queues_free(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues); +u32 nvgpu_gsp_queue_get_size(struct nvgpu_engine_mem_queue **queues, + u32 queue_id); +int nvgpu_gsp_queue_push(struct nvgpu_engine_mem_queue **queues, + u32 queue_id, struct nvgpu_falcon *flcn, + struct nv_flcn_cmd_gsp *cmd, u32 size); +bool nvgpu_gsp_queue_is_empty(struct nvgpu_engine_mem_queue **queues, + u32 queue_id); +bool nvgpu_gsp_queue_read(struct gk20a *g, + struct nvgpu_engine_mem_queue **queues, + u32 queue_id, struct nvgpu_falcon *flcn, void *data, + u32 bytes_to_read, int *status); +int nvgpu_gsp_queue_rewind(struct nvgpu_falcon *flcn, + struct nvgpu_engine_mem_queue **queues, + u32 queue_id); + +#endif /* NVGPU_GSP_QUEUE_H */