mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
struct nvgpu_channel_sync is moved to a private header i.e. channel_sync_priv.h present in common/sync/. All accesses to callback functions inside the struct nvgpu_channel_sync in NVGPU driver is replaced by the public channel_sync specific APIs. Jira NVGPU-1093 Change-Id: I52d57b3d458993203a3ac6b160fb569effbe5a66 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1929783 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
119 lines
3.5 KiB
C
119 lines
3.5 KiB
C
/*
|
|
* GK20A Channel Synchronization Abstraction
|
|
*
|
|
* Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
* DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
#include <nvgpu/kmem.h>
|
|
#include <nvgpu/log.h>
|
|
#include <nvgpu/atomic.h>
|
|
#include <nvgpu/bug.h>
|
|
#include <nvgpu/list.h>
|
|
#include <nvgpu/nvhost.h>
|
|
#include <nvgpu/gk20a.h>
|
|
#include <nvgpu/os_fence.h>
|
|
#include <nvgpu/os_fence_syncpts.h>
|
|
#include <nvgpu/os_fence_semas.h>
|
|
#include <nvgpu/channel.h>
|
|
#include <nvgpu/channel_sync.h>
|
|
#include <nvgpu/channel_sync_syncpt.h>
|
|
#include <nvgpu/channel_sync_semaphore.h>
|
|
|
|
#include "channel_sync_priv.h"
|
|
#include "gk20a/fence_gk20a.h"
|
|
#include "gk20a/mm_gk20a.h"
|
|
|
|
struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct channel_gk20a *c,
|
|
bool user_managed)
|
|
{
|
|
if (nvgpu_has_syncpoints(c->g)) {
|
|
return nvgpu_channel_sync_syncpt_create(c, user_managed);
|
|
} else {
|
|
return nvgpu_channel_sync_semaphore_create(c, user_managed);
|
|
}
|
|
}
|
|
|
|
bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g)
|
|
{
|
|
return !nvgpu_has_syncpoints(g);
|
|
}
|
|
|
|
bool nvgpu_has_syncpoints(struct gk20a *g)
|
|
{
|
|
#ifdef CONFIG_TEGRA_GK20A_NVHOST
|
|
return nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS) &&
|
|
!g->disable_syncpoints;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
int nvgpu_channel_sync_wait_fence_fd(struct nvgpu_channel_sync *s, int fd,
|
|
struct priv_cmd_entry *entry, u32 max_wait_cmds)
|
|
{
|
|
return s->wait_fence_fd(s, fd, entry, max_wait_cmds);
|
|
}
|
|
|
|
int nvgpu_channel_sync_incr(struct nvgpu_channel_sync *s,
|
|
struct priv_cmd_entry *entry, struct gk20a_fence *fence,
|
|
bool need_sync_fence, bool register_irq)
|
|
{
|
|
return s->incr(s, entry, fence, need_sync_fence, register_irq);
|
|
}
|
|
|
|
int nvgpu_channel_sync_incr_user(struct nvgpu_channel_sync *s,
|
|
int wait_fence_fd, struct priv_cmd_entry *entry,
|
|
struct gk20a_fence *fence, bool wfi, bool need_sync_fence,
|
|
bool register_irq)
|
|
{
|
|
return s->incr_user(s, wait_fence_fd, entry, fence, wfi,
|
|
need_sync_fence, register_irq);
|
|
}
|
|
|
|
void nvgpu_channel_sync_set_min_eq_max(struct nvgpu_channel_sync *s)
|
|
{
|
|
s->set_min_eq_max(s);
|
|
}
|
|
|
|
void nvgpu_channel_sync_set_safe_state(struct nvgpu_channel_sync *s)
|
|
{
|
|
s->set_safe_state(s);
|
|
}
|
|
|
|
void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync,
|
|
bool set_safe_state)
|
|
{
|
|
if (set_safe_state) {
|
|
sync->set_safe_state(sync);
|
|
}
|
|
sync->destroy(sync);
|
|
}
|
|
|
|
void nvgpu_channel_sync_get_ref(struct nvgpu_channel_sync *s)
|
|
{
|
|
nvgpu_atomic_inc(&s->refcount);
|
|
}
|
|
|
|
bool nvgpu_channel_sync_put_ref_and_check(struct nvgpu_channel_sync *s)
|
|
{
|
|
return nvgpu_atomic_dec_and_test(&s->refcount);
|
|
}
|
|
|