Files
linux-nvgpu/drivers/gpu/nvgpu/common/ce2.c
Seema Khowala 312f91f991 gpu: nvgpu: move fence_gk20a to common/fence
Move gk20a/fence_gk20a.c to common/fence/fence.c

Renamed
gk20a_fence_from_semaphore -> nvgpu_fence_from_semaphore
gk20a_fence_from_syncpt -> nvgpu_fence_from_syncpt
gk20a_alloc_fence_pool -> nvgpu_fence_pool_alloc
gk20a_free_fence_pool -> nvgpu_fence_pool_free
gk20a_alloc_fence -> nvgpu_fence_alloc
gk20a_init_fence -> nvgpu_fence_init
gk20a_fence_put -> nvgpu_fence_put
gk20a_fence_get -> nvgpu_fence_get
gk20a_fence_wait -> nvgpu_fence_wait
gk20a_fence_is_expired -> nvgpu_fence_is_expired
gk20a_fence_install_fd -> nvgpu_fence_install_fd
gk20a_fence_ops struct -> nvgpu_fence_ops struct
gk20a_fence struct -> nvgpu_fence_type struct

JIRA NVGPU-1982

Change-Id: Ife77b2c3c386ff4368683c78ca02f00c99cddb4b
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2093002
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
2019-04-10 17:24:52 -07:00

166 lines
4.7 KiB
C

/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/types.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/channel.h>
#include <nvgpu/utils.h>
#include <nvgpu/fence.h>
#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
#include "gk20a/ce2_gk20a.h"
static inline u32 gk20a_get_valid_launch_flags(struct gk20a *g, u32 launch_flags)
{
/* there is no local memory available,
don't allow local memory related CE flags */
if (g->mm.vidmem.size == 0ULL) {
launch_flags &= ~(NVGPU_CE_SRC_LOCATION_LOCAL_FB |
NVGPU_CE_DST_LOCATION_LOCAL_FB);
}
return launch_flags;
}
int gk20a_ce_execute_ops(struct gk20a *g,
u32 ce_ctx_id,
u64 src_buf,
u64 dst_buf,
u64 size,
unsigned int payload,
u32 launch_flags,
u32 request_operation,
u32 submit_flags,
struct nvgpu_fence_type **fence_out)
{
int ret = -EPERM;
struct gk20a_ce_app *ce_app = g->ce_app;
struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save;
bool found = false;
u32 *cmd_buf_cpu_va;
u64 cmd_buf_gpu_va = 0;
u32 methodSize;
u32 cmd_buf_read_offset;
u32 dma_copy_class;
struct nvgpu_gpfifo_entry gpfifo;
struct nvgpu_channel_fence fence = {0, 0};
struct nvgpu_fence_type *ce_cmd_buf_fence_out = NULL;
if (!ce_app->initialised || ce_app->app_state != NVGPU_CE_ACTIVE) {
goto end;
}
nvgpu_mutex_acquire(&ce_app->app_mutex);
nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
if (ce_ctx->ctx_id == ce_ctx_id) {
found = true;
break;
}
}
nvgpu_mutex_release(&ce_app->app_mutex);
if (!found) {
ret = -EINVAL;
goto end;
}
if (ce_ctx->gpu_ctx_state != NVGPU_CE_GPU_CTX_ALLOCATED) {
ret = -ENODEV;
goto end;
}
nvgpu_mutex_acquire(&ce_ctx->gpu_ctx_mutex);
ce_ctx->cmd_buf_read_queue_offset %= NVGPU_CE_MAX_INFLIGHT_JOBS;
cmd_buf_read_offset = (ce_ctx->cmd_buf_read_queue_offset *
(NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF /
U32(sizeof(u32))));
cmd_buf_cpu_va = (u32 *)ce_ctx->cmd_buf_mem.cpu_va;
if (ce_ctx->postfences[ce_ctx->cmd_buf_read_queue_offset] != NULL) {
struct nvgpu_fence_type **prev_post_fence =
&ce_ctx->postfences[ce_ctx->cmd_buf_read_queue_offset];
ret = nvgpu_fence_wait(g, *prev_post_fence,
nvgpu_get_poll_timeout(g));
nvgpu_fence_put(*prev_post_fence);
*prev_post_fence = NULL;
if (ret != 0) {
goto noop;
}
}
cmd_buf_gpu_va = (ce_ctx->cmd_buf_mem.gpu_va + (u64)(cmd_buf_read_offset *sizeof(u32)));
dma_copy_class = g->ops.get_litter_value(g, GPU_LIT_DMA_COPY_CLASS);
methodSize = gk20a_ce_prepare_submit(src_buf,
dst_buf,
size,
&cmd_buf_cpu_va[cmd_buf_read_offset],
NVGPU_CE_MAX_COMMAND_BUFF_BYTES_PER_KICKOFF,
payload,
gk20a_get_valid_launch_flags(g, launch_flags),
request_operation,
dma_copy_class);
if (methodSize != 0U) {
/* store the element into gpfifo */
gpfifo.entry0 =
u64_lo32(cmd_buf_gpu_va);
gpfifo.entry1 =
(u64_hi32(cmd_buf_gpu_va) |
pbdma_gp_entry1_length_f(methodSize));
/* take always the postfence as it is needed for protecting the ce context */
submit_flags |= NVGPU_SUBMIT_FLAGS_FENCE_GET;
nvgpu_smp_wmb();
ret = nvgpu_submit_channel_gpfifo_kernel(ce_ctx->ch, &gpfifo,
1, submit_flags, &fence, &ce_cmd_buf_fence_out);
if (ret == 0) {
ce_ctx->postfences[ce_ctx->cmd_buf_read_queue_offset] =
ce_cmd_buf_fence_out;
if (fence_out != NULL) {
nvgpu_fence_get(ce_cmd_buf_fence_out);
*fence_out = ce_cmd_buf_fence_out;
}
/* Next available command buffer queue Index */
++ce_ctx->cmd_buf_read_queue_offset;
}
} else {
ret = -ENOMEM;
}
noop:
nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex);
end:
return ret;
}