diff --git a/arch/nvgpu-common.yaml b/arch/nvgpu-common.yaml index 165248a5b..213837602 100644 --- a/arch/nvgpu-common.yaml +++ b/arch/nvgpu-common.yaml @@ -215,6 +215,8 @@ nvsched: owner: Alex W sources: [ common/nvs/nvs_sched.c, common/nvs/nvs_sched_ctrl.c, + common/nvs/nvs-control-interface-parser.c, + include/nvgpu/nvs-control-interface-parser.h, include/external-nvs/impl.h, include/external-nvs/types.h, include/nvgpu/nvs.h ] diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index caa6c3f8d..7ccaa54ce 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -1002,6 +1002,11 @@ nvgpu-y += \ os/linux/ioctl_nvs.o \ common/nvs/nvs_sched.o \ common/nvs/nvs_sched_ctrl.o + +ifeq ($(CONFIG_NVS_KMD_BACKEND), y) +nvgpu-y += common/nvs/nvs-control-interface-parser.o +endif + ccflags-y += \ $(patsubst %,-I$(srctree.nvgpu)/nvsched/%,$(NVS_INCLUDE)) \ -I$(srctree.nvgpu)/drivers/gpu/nvgpu/include/external-nvs \ diff --git a/drivers/gpu/nvgpu/Makefile.sources b/drivers/gpu/nvgpu/Makefile.sources index eb7a8ce40..beb1a4a16 100644 --- a/drivers/gpu/nvgpu/Makefile.sources +++ b/drivers/gpu/nvgpu/Makefile.sources @@ -192,6 +192,9 @@ endif ifeq ($(CONFIG_NVS_PRESENT),1) srcs += common/nvs/nvs_sched_ctrl.c \ common/nvs/nvs_sched.c +ifeq ($(CONFIG_NVS_KMD_BACKEND),1) +srcs += common/nvs/nvs-control-interface-parser.c +endif endif ifeq ($(CONFIG_NVGPU_GSP_SCHEDULER),1) diff --git a/drivers/gpu/nvgpu/common/nvs/nvs-control-interface-parser.c b/drivers/gpu/nvgpu/common/nvs/nvs-control-interface-parser.c new file mode 100644 index 000000000..49949cdf6 --- /dev/null +++ b/drivers/gpu/nvgpu/common/nvs/nvs-control-interface-parser.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static const u32 min_queue_size = 2U; + +static u32 nvs_control_atomic_read(void *const address) +{ + u32 value; + + value = NV_READ_ONCE(*(u32 *)address); + nvgpu_rmb(); + + return value; +} + +static void nvs_control_atomic_write(void *address, u32 value) +{ + NV_WRITE_ONCE(*(u32 *)address, value); + nvgpu_wmb(); +} + +static u64 nvs_control_atomic64_read(void *const address) +{ + u64 value; + + value = NV_READ_ONCE(*(u64 *)address); + nvgpu_rmb(); + + return value; +} + +static void nvs_control_atomic64_write(void *address, u64 value) +{ + NV_WRITE_ONCE(*(u64 *)address, value); + nvgpu_wmb(); +} + +void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender, + const u32 msg_number, const u32 msg_sequence_tag, + const u64 msg_timestamp_ns) +{ + u64 updated_put_revolutions; + + struct nvs_domain_message * const write_loc = &sender->fifo[sender->put_index]; + + nvgpu_mb(); + + write_loc->type = msg_number; + write_loc->sequence_tag = msg_sequence_tag; + write_loc->timestamp_ns = msg_timestamp_ns; + nvgpu_memcpy((u8 *)&write_loc->payload, (u8 *)sender->internal_buffer, + NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE); + + nvgpu_wmb(); + + sender->put_index++; + if (sender->put_index == sender->num_queue_entries) { + sender->put_index = 0; + sender->num_revolutions = nvgpu_wrapping_add_u32(sender->num_revolutions, 1U); + } + + updated_put_revolutions = hi32_lo32_to_u64(sender->num_revolutions, + sender->put_index); + + nvs_control_atomic64_write(&sender->control_interface->put_revolutions, + updated_put_revolutions); +} + +void nvs_control_fifo_sender_out_of_space(struct nvs_control_fifo_sender *const sender) +{ + sender->num_dropped_messages++; + nvs_control_atomic64_write(&sender->control_interface->num_dropped_messages, + sender->num_dropped_messages); +} + +int nvs_control_fifo_sender_can_write(struct nvs_control_fifo_sender *const sender) +{ + u64 curr_put_revolution; + u64 occupied_slots; + u64 available_slots; + u64 curr_get_index = nvs_control_atomic_read(&sender->control_interface->get); + + /* bound check the get index */ + if (curr_get_index >= sender->num_queue_entries) { + nvgpu_err(sender->g, "Get is out of bounds"); + return -EINVAL; + } else if (curr_get_index == NVS_DOMAIN_MSG_FIFO_CONTROL_GET_FLOW_CTRL_DISABLED) { + /* space is always available */ + return 0; + } + + /* bound check the put index */ + if (sender->put_index >= sender->num_queue_entries) { + nvgpu_err(sender->g, "Put is out of bounds. Probable memory corruption"); + return -EBADF; + } + + curr_put_revolution = nvs_control_atomic64_read( + &sender->control_interface->put_revolutions); + if (curr_put_revolution != hi32_lo32_to_u64(sender->num_revolutions, + sender->put_index)) { + nvgpu_err(sender->g, "Put index has changed since our last index"); + return -EINVAL; + } + + if (sender->put_index >= curr_get_index) { + occupied_slots = sender->put_index - curr_get_index; + } else { + occupied_slots = sender->num_queue_entries - curr_get_index + sender->put_index; + } + + available_slots = sender->num_queue_entries - occupied_slots - 1U; + if (available_slots == 0) { + return -EAGAIN; + } + + return 0; +} + +struct nvs_control_fifo_sender *nvs_control_fifo_sender_initialize( + struct gk20a *g, struct nvs_domain_msg_fifo *const ring_buffer, + u64 buffer_size_bytes) +{ + u64 current_put_revolutions; + struct nvs_control_fifo_sender *sender = NULL; + const u32 lower_bound_buffer_size = + nvgpu_safe_add_u32(nvgpu_safe_mult_u32(min_queue_size, + sizeof(struct nvs_domain_message)), + sizeof(struct nvs_domain_msg_fifo_control)); + + if (ring_buffer == NULL) { + nvgpu_err(g, "ring buffer is NULL"); + return NULL; + } + + if (buffer_size_bytes < lower_bound_buffer_size) { + nvgpu_err(g, "buffer size must be a minimum of 2 entries"); + return NULL; + } + + sender = nvgpu_kzalloc(g, sizeof(*sender)); + if (sender == NULL) { + return NULL; + } + + sender->g = g; + + sender->num_queue_entries = (nvgpu_safe_sub_u64(buffer_size_bytes, + sizeof(struct nvs_domain_msg_fifo_control))) / sizeof( + struct nvs_domain_message); + + sender->fifo = ring_buffer->messages; + sender->control_interface = &ring_buffer->control; + + current_put_revolutions = nvs_control_atomic64_read( + &sender->control_interface->put_revolutions); + sender->put_index = u64_lo32(current_put_revolutions); + sender->num_revolutions = u64_hi32(current_put_revolutions); + + sender->num_dropped_messages = nvs_control_atomic64_read( + &sender->control_interface->num_dropped_messages); + + if (sender->put_index >= sender->num_queue_entries) { + nvgpu_err(g, "Put Index more than Max Queue size"); + nvgpu_kfree(g, sender); + return NULL; + } + + return sender; +} + +void nvs_control_fifo_read_message(struct nvs_control_fifo_receiver *const receiver) +{ + struct nvs_domain_message *const read_loc = &receiver->fifo[receiver->get_index]; + + nvgpu_rmb(); + + /* Copy the message from the buffer */ + receiver->msg_type = read_loc->type; + receiver->msg_sequence = read_loc->sequence_tag; + receiver->msg_timestamp_ns = read_loc->timestamp_ns; + + memset((u8 *)&receiver->internal_buffer, 0, NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE); + + nvgpu_memcpy((u8 *)&receiver->internal_buffer, + (u8 *)&read_loc->payload, NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE); + + nvgpu_mb(); + + receiver->get_index++; + if (receiver->get_index == receiver->num_queue_entries) { + receiver->get_index = 0; + } + + nvs_control_atomic_write(&receiver->control_interface->get, receiver->get_index); +} + +int nvs_control_fifo_receiver_can_read(struct nvs_control_fifo_receiver *const receiver) +{ + u32 put; + u64 curr_put_revolution; + + curr_put_revolution = nvs_control_atomic64_read( + &receiver->control_interface->put_revolutions); + put = u64_lo32(curr_put_revolution); + + if (put == receiver->get_index) { + nvs_dbg(receiver->g, "No new message"); + return -EAGAIN; + } + + return 0; +} + +struct nvs_control_fifo_receiver *nvs_control_fifo_receiver_initialize( + struct gk20a *g, struct nvs_domain_msg_fifo *const fifo, + u64 buffer_size_bytes) +{ + struct nvs_control_fifo_receiver *receiver = NULL; + u64 num_put_revolutions; + + const u32 lower_bound_buffer_size = + nvgpu_safe_add_u32(nvgpu_safe_mult_u32(min_queue_size, + sizeof(struct nvs_domain_message)), + sizeof(struct nvs_domain_msg_fifo_control)); + + if (fifo == NULL) { + nvgpu_err(g, "ring buffer is NULL"); + return NULL; + } + + if (buffer_size_bytes < lower_bound_buffer_size) { + nvgpu_err(g, "buffer size must be a minimum of 2 entries"); + return NULL; + } + + receiver = nvgpu_kzalloc(g, sizeof(*receiver)); + if (receiver == NULL) { + return NULL; + } + + receiver->g = g; + + receiver->num_queue_entries = (nvgpu_safe_sub_u64(buffer_size_bytes, + sizeof(struct nvs_domain_msg_fifo_control))) / sizeof( + struct nvs_domain_message); + + receiver->fifo = fifo->messages; + receiver->control_interface = &fifo->control; + + num_put_revolutions = nvs_control_atomic64_read( + &receiver->control_interface->put_revolutions); + receiver->get_index = u64_lo32(num_put_revolutions); + + if (receiver->get_index >= receiver->num_queue_entries) { + nvgpu_err(g, "Get Index more than Max Queue size"); + nvgpu_kfree(g, receiver); + return NULL; + } + + nvs_control_fifo_enable_flow_control(receiver->control_interface, receiver->get_index); + + return receiver; +} + +void nvs_control_fifo_sender_exit(struct gk20a *g, + struct nvs_control_fifo_sender *const sender) +{ + nvs_control_fifo_disable_flow_control(sender->control_interface); + nvgpu_kfree(g, sender); +} + +void nvs_control_fifo_receiver_exit(struct gk20a *g, + struct nvs_control_fifo_receiver *const receiver) +{ + nvs_control_fifo_disable_flow_control(receiver->control_interface); + nvgpu_kfree(g, receiver); +} + +void nvs_control_fifo_enable_flow_control(struct nvs_domain_msg_fifo_control *control_interface, + u32 get_index) +{ + nvs_control_atomic_write(&control_interface->get, get_index); +} + +void nvs_control_fifo_disable_flow_control(struct nvs_domain_msg_fifo_control *control_interface) +{ + nvs_control_atomic_write(&control_interface->get, + NVS_DOMAIN_MSG_FIFO_CONTROL_GET_FLOW_CTRL_DISABLED); +} \ No newline at end of file diff --git a/drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c b/drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c index 0a895d253..357d7b8eb 100644 --- a/drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c +++ b/drivers/gpu/nvgpu/common/nvs/nvs_sched_ctrl.c @@ -27,6 +27,7 @@ #include #include #include +#include #include struct nvgpu_nvs_domain_ctrl_fifo_users { @@ -48,19 +49,29 @@ struct nvgpu_nvs_domain_ctrl_fifo_queues { * and queried by the scheduler(GET). */ struct nvgpu_nvs_ctrl_queue send; + + /* + * This contains the APIs required for reading the send queue. + */ + struct nvs_control_fifo_receiver *send_queue_receiver; + /* * receive indicates a buffer having data(PUT) written by scheduler * and queried by the userspace client(GET). */ struct nvgpu_nvs_ctrl_queue receive; + /* + * This contains the APIs required for writing the receive queue. + */ + struct nvs_control_fifo_sender *receiver_queue_sender; + /* * event indicates a buffer that is subscribed to by userspace clients to * receive events. This buffer is Read-Only for the users and only scheduler can * write to it. */ struct nvgpu_nvs_ctrl_queue event; - /* * Global mutex for coarse grained access control * of all Queues for all UMD interfaces. e.g. IOCTL/devctls @@ -217,6 +228,72 @@ struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g) return sched; } + +void nvgpu_nvs_domain_ctrl_fifo_set_receiver(struct gk20a *g, + struct nvs_control_fifo_receiver *receiver) +{ + struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; + + if (sched_ctrl == NULL) { + return; + } + + sched_ctrl->queues.send_queue_receiver = receiver; + + nvgpu_smp_wmb(); +} + +void nvgpu_nvs_domain_ctrl_fifo_set_sender(struct gk20a *g, + struct nvs_control_fifo_sender *sender) +{ + struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; + + if (sched_ctrl == NULL) { + return; + } + + sched_ctrl->queues.receiver_queue_sender = sender; + + nvgpu_smp_wmb(); +} + +struct nvs_control_fifo_receiver *nvgpu_nvs_domain_ctrl_fifo_get_receiver(struct gk20a *g) +{ + struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; + + if (sched_ctrl == NULL) { + return NULL; + } + + nvgpu_smp_rmb(); + + return sched_ctrl->queues.send_queue_receiver; +} + +struct nvs_control_fifo_sender *nvgpu_nvs_domain_ctrl_fifo_get_sender(struct gk20a *g) +{ + struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; + + if (sched_ctrl == NULL) { + return NULL; + } + + nvgpu_smp_rmb(); + + return sched_ctrl->queues.receiver_queue_sender; +} + +bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g) +{ + struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; + + if (sched_ctrl == NULL) { + return false; + } + + return nvgpu_nvs_ctrl_fifo_is_busy(sched_ctrl); +} + void nvgpu_nvs_ctrl_fifo_idle(struct gk20a *g) { struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl = g->sched_ctrl_fifo; @@ -327,6 +404,10 @@ int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, int err; struct gk20a *g = sched_ctrl->g; struct vm_gk20a *system_vm = g->mm.pmu.vm; +#ifdef CONFIG_NVS_KMD_BACKEND + struct nvs_control_fifo_receiver *send_queue_receiver; + struct nvs_control_fifo_sender *receiver_queue_sender; +#endif (void)memset(buf, 0, sizeof(*buf)); buf->g = g; @@ -337,6 +418,24 @@ int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, goto fail; } +#ifdef CONFIG_NVS_KMD_BACKEND + if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE) { + send_queue_receiver = nvs_control_fifo_receiver_initialize(g, + (struct nvs_domain_msg_fifo * const)buf->mem.cpu_va, bytes); + if (send_queue_receiver == NULL) { + goto fail; + } + nvgpu_nvs_domain_ctrl_fifo_set_receiver(g, send_queue_receiver); + } else if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ) { + receiver_queue_sender = nvs_control_fifo_sender_initialize(g, + (struct nvs_domain_msg_fifo *)buf->mem.cpu_va, bytes); + if (receiver_queue_sender == NULL) { + goto fail; + } + nvgpu_nvs_domain_ctrl_fifo_set_sender(g, receiver_queue_sender); + } +#endif + buf->valid = true; buf->mask = mask; @@ -353,6 +452,21 @@ void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, { struct gk20a *g = sched_ctrl->g; struct vm_gk20a *system_vm = g->mm.pmu.vm; + u8 mask = buf->mask; +#ifdef CONFIG_NVS_KMD_BACKEND + struct nvs_control_fifo_receiver * const send_queue_receiver = + nvgpu_nvs_domain_ctrl_fifo_get_receiver(g); + struct nvs_control_fifo_sender * const receiver_queue_sender = + nvgpu_nvs_domain_ctrl_fifo_get_sender(g); + + if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_WRITE) { + nvgpu_nvs_domain_ctrl_fifo_set_receiver(g, NULL); + nvs_control_fifo_receiver_exit(g, send_queue_receiver); + } else if (mask == NVGPU_NVS_CTRL_FIFO_QUEUE_EXCLUSIVE_CLIENT_READ) { + nvgpu_nvs_domain_ctrl_fifo_set_sender(g, NULL); + nvs_control_fifo_sender_exit(g, receiver_queue_sender); + } +#endif if (nvgpu_mem_is_valid(&buf->mem)) { nvgpu_dma_unmap_free(system_vm, &buf->mem); diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvs-control-interface-parser.h b/drivers/gpu/nvgpu/include/nvgpu/nvs-control-interface-parser.h new file mode 100644 index 000000000..a0de8837b --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/nvs-control-interface-parser.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVS_CONTROL_INTERFACE_PARSER_H +#define NVS_CONTROL_INTERFACE_PARSER_H + +#ifdef CONFIG_NVS_PRESENT +#include +#include +#else +#define NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE 48 +#endif + +#include + +struct gk20a; +struct nvs_domain_message; +struct nvs_domain_msg_fifo_control; +struct nvs_domain_msg_fifo; + +struct nvs_control_fifo_sender { + struct gk20a *g; + /* pointer to the message queues */ + struct nvs_domain_message *fifo; + /* pointer to the control interface */ + struct nvs_domain_msg_fifo_control *control_interface; + /* internal buffer storage */ + u8 internal_buffer[NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE]; + /* Below fields maintain local copies */ + u32 put_index; + u32 num_revolutions; + u64 num_queue_entries; + u64 num_dropped_messages; +}; + +struct nvs_control_fifo_receiver { + struct gk20a *g; + /* pointer to the message queues */ + struct nvs_domain_message *fifo; + /* pointer to the control interface */ + struct nvs_domain_msg_fifo_control *control_interface; + /* internal buffer storage */ + u8 internal_buffer[NVS_DOMAIN_MESSAGE_MAX_PAYLOAD_SIZE]; + /* Below fields maintain local copies */ + u32 msg_type; + u32 msg_sequence; + u64 msg_timestamp_ns; + + u32 get_index; + u64 num_queue_entries; +}; + +void nvs_control_fifo_sender_write_message(struct nvs_control_fifo_sender *const sender, + u32 const msg_number, u32 const msg_sequence_tag, + u64 const msg_timestamp_ns); +void nvs_control_fifo_sender_out_of_space(struct nvs_control_fifo_sender *const sender); +int nvs_control_fifo_sender_can_write(struct nvs_control_fifo_sender * const sender); +struct nvs_control_fifo_sender *nvs_control_fifo_sender_initialize( + struct gk20a *g, struct nvs_domain_msg_fifo *fifo, u64 buffer_size_bytes); + +void nvs_control_fifo_read_message(struct nvs_control_fifo_receiver *const receiver); +int nvs_control_fifo_receiver_can_read(struct nvs_control_fifo_receiver *const receiver); +struct nvs_control_fifo_receiver *nvs_control_fifo_receiver_initialize( + struct gk20a *g, struct nvs_domain_msg_fifo *const fifo, + u64 buffer_size_bytes); + +void nvs_control_fifo_receiver_exit(struct gk20a *g, + struct nvs_control_fifo_receiver *const receiver); +void nvs_control_fifo_sender_exit(struct gk20a *g, + struct nvs_control_fifo_sender *const sender); +void nvs_control_fifo_enable_flow_control(struct nvs_domain_msg_fifo_control *control_interface, + u32 get_index); +void nvs_control_fifo_disable_flow_control(struct nvs_domain_msg_fifo_control *control_interface); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvs.h b/drivers/gpu/nvgpu/include/nvgpu/nvs.h index 76f1951fd..96adb9ba6 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvs.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvs.h @@ -32,6 +32,7 @@ #include #include #include +#include /* * Max size we'll parse from an NVS log entry. @@ -260,6 +261,7 @@ void nvgpu_nvs_worker_pause(struct gk20a *g); void nvgpu_nvs_worker_resume(struct gk20a *g); #endif +bool nvgpu_nvs_ctrl_fifo_is_enabled(struct gk20a *g); struct nvgpu_nvs_domain_ctrl_fifo *nvgpu_nvs_ctrl_fifo_create(struct gk20a *g); bool nvgpu_nvs_ctrl_fifo_user_exists(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, int pid, bool rw); @@ -285,6 +287,16 @@ struct nvgpu_nvs_ctrl_queue *nvgpu_nvs_ctrl_fifo_get_queue( enum nvgpu_nvs_ctrl_queue_num queue_num, enum nvgpu_nvs_ctrl_queue_direction queue_direction, u8 *mask); + +#ifdef CONFIG_NVS_KMD_BACKEND +struct nvs_control_fifo_receiver *nvgpu_nvs_domain_ctrl_fifo_get_receiver(struct gk20a *g); +struct nvs_control_fifo_sender *nvgpu_nvs_domain_ctrl_fifo_get_sender(struct gk20a *g); +void nvgpu_nvs_domain_ctrl_fifo_set_receiver(struct gk20a *g, + struct nvs_control_fifo_receiver *receiver); +void nvgpu_nvs_domain_ctrl_fifo_set_sender(struct gk20a *g, + struct nvs_control_fifo_sender *sender); +#endif + /* Below methods require nvgpu_nvs_ctrl_fifo_lock_queues() to be held. */ bool nvgpu_nvs_buffer_is_valid(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *buf); int nvgpu_nvs_buffer_alloc(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl, diff --git a/nvsched/include/nvs/log.h b/nvsched/include/nvs/log.h index f730ec819..aac6e2e82 100644 --- a/nvsched/include/nvs/log.h +++ b/nvsched/include/nvs/log.h @@ -34,6 +34,7 @@ enum nvs_event { NVS_EV_CREATE_SCHED, NVS_EV_CREATE_DOMAIN, NVS_EV_REMOVE_DOMAIN, + NVS_EV_CTRL_QUEUE, NVS_EV_MAX = 0xffffffff /* Force to 32 bit enum size. */ }; diff --git a/nvsched/src/logging.c b/nvsched/src/logging.c index 3265bb021..7d5cc374b 100644 --- a/nvsched/src/logging.c +++ b/nvsched/src/logging.c @@ -124,6 +124,7 @@ const char *nvs_log_event_string(enum nvs_event ev) case NVS_EV_CREATE_SCHED: return "Create scheduler"; case NVS_EV_CREATE_DOMAIN: return "Create domain"; case NVS_EV_REMOVE_DOMAIN: return "Remove domain"; + case NVS_EV_CTRL_QUEUE: return "Control Queue"; case NVS_EV_MAX: return "Invalid MAX event"; }