/* * GK20A Channel Synchronization Abstraction * * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "channel_sync_priv.h" struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct nvgpu_channel *c, bool user_managed) { if (nvgpu_has_syncpoints(c->g)) { return nvgpu_channel_sync_syncpt_create(c, user_managed); } else { return nvgpu_channel_sync_semaphore_create(c, user_managed); } } bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g) { return !nvgpu_has_syncpoints(g); } bool nvgpu_has_syncpoints(struct gk20a *g) { #ifdef CONFIG_TEGRA_GK20A_NVHOST return nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS) && !g->disable_syncpoints; #else return false; #endif } int nvgpu_channel_sync_wait_fence_fd(struct nvgpu_channel_sync *s, int fd, struct priv_cmd_entry *entry, u32 max_wait_cmds) { return s->wait_fence_fd(s, fd, entry, max_wait_cmds); } int nvgpu_channel_sync_incr(struct nvgpu_channel_sync *s, struct priv_cmd_entry *entry, struct nvgpu_fence_type *fence, bool need_sync_fence, bool register_irq) { return s->incr(s, entry, fence, need_sync_fence, register_irq); } int nvgpu_channel_sync_incr_user(struct nvgpu_channel_sync *s, int wait_fence_fd, struct priv_cmd_entry *entry, struct nvgpu_fence_type *fence, bool wfi, bool need_sync_fence, bool register_irq) { return s->incr_user(s, wait_fence_fd, entry, fence, wfi, need_sync_fence, register_irq); } void nvgpu_channel_sync_set_min_eq_max(struct nvgpu_channel_sync *s) { s->set_min_eq_max(s); } void nvgpu_channel_sync_set_safe_state(struct nvgpu_channel_sync *s) { s->set_safe_state(s); } void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync, bool set_safe_state) { if (set_safe_state) { sync->set_safe_state(sync); } sync->destroy(sync); } void nvgpu_channel_sync_get_ref(struct nvgpu_channel_sync *s) { nvgpu_atomic_inc(&s->refcount); } bool nvgpu_channel_sync_put_ref_and_check(struct nvgpu_channel_sync *s) { return nvgpu_atomic_dec_and_test(&s->refcount); }