gpu: nvgpu: nvs: queue direction update

Changes:
-  update nvgpu_nvs_ctrl_queue to have queue direction as it is required
by gsp scheduler to erase queue individually
- queue direction is updated during ioctl call to create queue and is
used only by gsp scheduler. So no other moduler should be affected by
it.

- need to pass the size of struct which is u32 so downgrading it from
u64 to u32 is intentional, misra C violation 10.3 can be ignored here

Bug 4027512

Change-Id: I6ef6e4b06124e25da3d004a2d8822516c3ac2105
Signed-off-by: vivekku <vivekku@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2881804
Reviewed-by: svcacv <svcacv@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: Mahantesh Kumbar <mkumbar@nvidia.com>
GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
vivekku
2023-04-03 17:09:40 +00:00
committed by mobile promotions
parent a4175b1265
commit bd5ab81ccc
5 changed files with 32 additions and 5 deletions

View File

@@ -97,16 +97,38 @@ exit:
return err;
}
int nvgpu_gsp_sched_erase_ctrl_fifo(struct gk20a *g)
int nvgpu_gsp_sched_erase_ctrl_fifo(struct gk20a *g,
enum nvgpu_nvs_ctrl_queue_direction queue_direction)
{
int err = 0;
struct nv_flcn_cmd_gsp cmd = { };
enum queue_type qtype;
err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_FIFO_ERASE, 0);
/* populating command with only queue direction */
cmd.cmd.ctrl_fifo.fifo_addr_lo = 0U;
cmd.cmd.ctrl_fifo.fifo_addr_hi = 0U;
cmd.cmd.ctrl_fifo.queue_entries = 0U;
cmd.cmd.ctrl_fifo.queue_size = 0U;
if (queue_direction == NVGPU_NVS_DIR_CLIENT_TO_SCHEDULER) {
qtype = CONTROL_QUEUE;
} else if (queue_direction == NVGPU_NVS_DIR_SCHEDULER_TO_CLIENT) {
qtype = RESPONSE_QUEUE;
} else {
nvgpu_err(g, "Erase queue failed queue type not supported");
err = -EINVAL;
goto exit;
}
cmd.cmd.ctrl_fifo.qtype = (u32)qtype;
err = gsp_send_cmd_and_wait_for_ack(g, &cmd, NV_GSP_UNIT_CONTROL_FIFO_ERASE,
sizeof(struct nvgpu_gsp_ctrl_fifo_info));
if (err != 0) {
nvgpu_err(g, "GSP ctrl fifo erase cmd failed");
}
exit:
return err;
};
#endif /* CONFIG_NVS_PRESENT*/

View File

@@ -620,7 +620,7 @@ void nvgpu_nvs_buffer_free(struct nvgpu_nvs_domain_ctrl_fifo *sched_ctrl,
if (nvgpu_mem_is_valid(&buf->mem)) {
#if defined (CONFIG_NVS_PRESENT) && defined (CONFIG_NVGPU_GSP_SCHEDULER)
if (nvgpu_is_enabled(g, (u32)(NVGPU_SUPPORT_GSP_SCHED))) {
nvgpu_gsp_sched_erase_ctrl_fifo(g);
nvgpu_gsp_sched_erase_ctrl_fifo(g, buf->queue_direction);
}
#endif
nvgpu_dma_unmap_free(system_vm, &buf->mem);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -132,6 +132,7 @@ bool nvgpu_gsp_is_ready(struct gk20a *g);
int nvgpu_gsp_sched_send_queue_info(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue,
enum nvgpu_nvs_ctrl_queue_direction queue_direction);
int nvgpu_gsp_sched_erase_ctrl_fifo(struct gk20a *g);
int nvgpu_gsp_sched_erase_ctrl_fifo(struct gk20a *g,
enum nvgpu_nvs_ctrl_queue_direction queue_direction);
#endif
#endif /* GSP_SCHED_H */

View File

@@ -297,6 +297,7 @@ struct nvgpu_nvs_ctrl_queue {
bool valid;
u8 mask;
u8 ref;
enum nvgpu_nvs_ctrl_queue_direction queue_direction;
void (*free)(struct gk20a *g, struct nvgpu_nvs_ctrl_queue *queue);
};

View File

@@ -876,6 +876,9 @@ static int nvgpu_nvs_ctrl_fifo_create_queue(struct gk20a *g,
goto fail;
}
/* queue direction is needed by gsp scheduler */
queue->queue_direction = queue_direction;
read_only = (args->access_type == NVGPU_NVS_CTRL_FIFO_QUEUE_ACCESS_TYPE_EXCLUSIVE) ? false : true;
if (read_only) {
flag |= O_RDONLY;