Open source GPL/LGPL release

This commit is contained in:
svcmobrel-release
2025-12-19 15:25:44 -08:00
commit 9fc87a7ec7
2261 changed files with 576825 additions and 0 deletions

View File

@@ -0,0 +1,104 @@
/*
* GK20A Channel Synchronization Abstraction
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/atomic.h>
#include <nvgpu/bug.h>
#include <nvgpu/list.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/os_fence.h>
#include <nvgpu/os_fence_syncpts.h>
#include <nvgpu/os_fence_semas.h>
#include <nvgpu/channel.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/channel_sync_syncpt.h>
#include <nvgpu/channel_sync_semaphore.h>
#include <nvgpu/fence.h>
#include "channel_sync_priv.h"
struct nvgpu_channel_sync *nvgpu_channel_sync_create(struct nvgpu_channel *c)
{
if (nvgpu_has_syncpoints(c->g)) {
return nvgpu_channel_sync_syncpt_create(c);
} else {
#ifdef CONFIG_NVGPU_SW_SEMAPHORE
return nvgpu_channel_sync_semaphore_create(c);
#else
return NULL;
#endif
}
}
bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g)
{
return !nvgpu_has_syncpoints(g);
}
int nvgpu_channel_sync_wait_fence_fd(struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **entry, u32 max_wait_cmds)
{
return s->ops->wait_fence_fd(s, fd, entry, max_wait_cmds);
}
int nvgpu_channel_sync_incr(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry, struct nvgpu_fence_type *fence,
bool need_sync_fence)
{
return s->ops->incr(s, entry, fence, need_sync_fence);
}
int nvgpu_channel_sync_incr_user(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry, struct nvgpu_fence_type *fence,
bool wfi, bool need_sync_fence)
{
return s->ops->incr_user(s, entry, fence, wfi, need_sync_fence);
}
void nvgpu_channel_sync_mark_progress(struct nvgpu_channel_sync *s,
bool register_irq)
{
s->ops->mark_progress(s, register_irq);
}
void nvgpu_channel_sync_set_min_eq_max(struct nvgpu_channel_sync *s)
{
s->ops->set_min_eq_max(s);
}
void nvgpu_channel_sync_get_ref(struct nvgpu_channel_sync *s)
{
nvgpu_atomic_inc(&s->refcount);
}
bool nvgpu_channel_sync_put_ref_and_check(struct nvgpu_channel_sync *s)
{
return nvgpu_atomic_dec_and_test(&s->refcount);
}
void nvgpu_channel_sync_destroy(struct nvgpu_channel_sync *sync)
{
sync->ops->destroy(sync);
}

View File

@@ -0,0 +1,83 @@
/*
* Nvgpu Channel Synchronization Abstraction
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_CHANNEL_SYNC_PRIV_H
#define NVGPU_CHANNEL_SYNC_PRIV_H
/*
* These APIs are used for job synchronization that we know about in the
* driver. If submits happen in userspace only, none of this will be needed and
* won't be included. This is here just to double check for now.
*/
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#include <nvgpu/atomic.h>
#include <nvgpu/types.h>
struct priv_cmd_entry;
struct nvgpu_fence_type;
struct nvgpu_channel_sync_ops;
/*
* This struct is private and should not be used directly. Users should
* instead use the public APIs starting with nvgpu_channel_sync_*
*/
struct nvgpu_channel_sync {
nvgpu_atomic_t refcount;
const struct nvgpu_channel_sync_ops *ops;
};
/*
* This struct is private and should not be used directly. Users should
* instead use the public APIs starting with nvgpu_channel_sync_*
*/
struct nvgpu_channel_sync_ops {
int (*wait_fence_raw)(struct nvgpu_channel_sync *s, u32 id, u32 thresh,
struct priv_cmd_entry **entry);
int (*wait_fence_fd)(struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **entry, u32 max_wait_cmds);
int (*incr)(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool need_sync_fence);
int (*incr_user)(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool wfi,
bool need_sync_fence);
void (*mark_progress)(struct nvgpu_channel_sync *s,
bool register_irq);
void (*set_min_eq_max)(struct nvgpu_channel_sync *s);
void (*destroy)(struct nvgpu_channel_sync *s);
};
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT */
#endif /* NVGPU_CHANNEL_SYNC_PRIV_H */

View File

@@ -0,0 +1,389 @@
/*
* GK20A Channel Synchronization Abstraction
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/atomic.h>
#include <nvgpu/bug.h>
#include <nvgpu/list.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/semaphore.h>
#include <nvgpu/os_fence.h>
#include <nvgpu/os_fence_semas.h>
#include <nvgpu/channel.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/channel_sync_semaphore.h>
#include <nvgpu/priv_cmdbuf.h>
#include <nvgpu/fence.h>
#include <nvgpu/fence_sema.h>
#include "channel_sync_priv.h"
struct nvgpu_channel_sync_semaphore {
struct nvgpu_channel_sync base;
struct nvgpu_channel *c;
struct nvgpu_hw_semaphore *hw_sema;
};
static struct nvgpu_channel_sync_semaphore *
nvgpu_channel_sync_semaphore_from_base(struct nvgpu_channel_sync *base)
{
return (struct nvgpu_channel_sync_semaphore *)
((uintptr_t)base -
offsetof(struct nvgpu_channel_sync_semaphore, base));
}
#ifndef CONFIG_NVGPU_SYNCFD_NONE
static void add_sema_wait_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd)
{
int ch = c->chid;
u64 va;
/* acquire just needs to read the mem. */
va = nvgpu_semaphore_gpu_ro_va(s);
g->ops.sync.sema.add_wait_cmd(g, cmd, s, va);
gpu_sema_verbose_dbg(g, "(A) c=%d ACQ_GE %-4u pool=%-3llu"
"va=0x%llx cmd=%p",
ch, nvgpu_semaphore_get_value(s),
nvgpu_semaphore_get_hw_pool_page_idx(s),
va, cmd);
}
static void channel_sync_semaphore_gen_wait_cmd(struct nvgpu_channel *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size)
{
bool has_incremented;
if (sema == NULL) {
/* came from an expired sync fence */
nvgpu_priv_cmdbuf_append_zeros(c->g, wait_cmd, wait_cmd_size);
} else {
has_incremented = nvgpu_semaphore_can_wait(sema);
nvgpu_assert(has_incremented);
add_sema_wait_cmd(c->g, c, sema, wait_cmd);
nvgpu_semaphore_put(sema);
}
}
#endif
static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
bool wfi, struct nvgpu_hw_semaphore *hw_sema)
{
int ch = c->chid;
u64 va;
/* release will need to write back to the semaphore memory. */
va = nvgpu_semaphore_gpu_rw_va(s);
/* find the right sema next_value to write (like syncpt's max). */
nvgpu_semaphore_prepare(s, hw_sema);
g->ops.sync.sema.add_incr_cmd(g, cmd, s, va, wfi);
gpu_sema_verbose_dbg(g, "(R) c=%d INCR %u (%u) pool=%-3llu"
"va=0x%llx entry=%p",
ch, nvgpu_semaphore_get_value(s),
nvgpu_semaphore_read(s),
nvgpu_semaphore_get_hw_pool_page_idx(s),
va, cmd);
}
static int channel_sync_semaphore_wait_fd(
struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **entry, u32 max_wait_cmds)
{
#ifndef CONFIG_NVGPU_SYNCFD_NONE
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
struct nvgpu_channel *c = sema->c;
struct nvgpu_os_fence os_fence = {0};
struct nvgpu_os_fence_sema os_fence_sema = {0};
int err;
u32 wait_cmd_size, i, num_fences;
struct nvgpu_semaphore *semaphore = NULL;
err = nvgpu_os_fence_fdget(&os_fence, c, fd);
if (err != 0) {
return err;
}
err = nvgpu_os_fence_get_semas(&os_fence_sema, &os_fence);
if (err != 0) {
goto cleanup;
}
num_fences = nvgpu_os_fence_sema_get_num_semaphores(&os_fence_sema);
if (num_fences == 0U) {
goto cleanup;
}
if ((max_wait_cmds != 0U) && (num_fences > max_wait_cmds)) {
err = -EINVAL;
goto cleanup;
}
wait_cmd_size = c->g->ops.sync.sema.get_wait_cmd_size();
err = nvgpu_priv_cmdbuf_alloc(c->priv_cmd_q,
wait_cmd_size * num_fences, entry);
if (err != 0) {
goto cleanup;
}
for (i = 0; i < num_fences; i++) {
nvgpu_os_fence_sema_extract_nth_semaphore(
&os_fence_sema, i, &semaphore);
channel_sync_semaphore_gen_wait_cmd(c, semaphore, *entry,
wait_cmd_size);
}
cleanup:
os_fence.ops->drop_ref(&os_fence);
return err;
#else
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV;
#endif
}
static int channel_sync_semaphore_incr_common(
struct nvgpu_channel_sync *s, bool wfi_cmd,
struct priv_cmd_entry **incr_cmd,
struct nvgpu_fence_type *fence,
bool need_sync_fence)
{
u32 incr_cmd_size;
struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_base(s);
struct nvgpu_channel *c = sp->c;
struct nvgpu_semaphore *semaphore;
int err = 0;
struct nvgpu_os_fence os_fence = {0};
semaphore = nvgpu_semaphore_alloc(sp->hw_sema);
if (semaphore == NULL) {
nvgpu_err(c->g,
"ran out of semaphores");
return -ENOMEM;
}
incr_cmd_size = c->g->ops.sync.sema.get_incr_cmd_size();
err = nvgpu_priv_cmdbuf_alloc(c->priv_cmd_q, incr_cmd_size, incr_cmd);
if (err != 0) {
goto clean_up_sema;
}
/* Release the completion semaphore. */
add_sema_incr_cmd(c->g, c, semaphore, *incr_cmd, wfi_cmd, sp->hw_sema);
if (need_sync_fence) {
err = nvgpu_os_fence_sema_create(&os_fence, c, semaphore);
if (err != 0) {
goto clean_up_cmdbuf;
}
}
nvgpu_fence_from_semaphore(fence, semaphore, &c->semaphore_wq, os_fence);
return 0;
clean_up_cmdbuf:
nvgpu_priv_cmdbuf_rollback(c->priv_cmd_q, *incr_cmd);
clean_up_sema:
nvgpu_semaphore_put(semaphore);
return err;
}
static int channel_sync_semaphore_incr(
struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool need_sync_fence)
{
/* Don't put wfi cmd to this one since we're not returning
* a fence to user space. */
return channel_sync_semaphore_incr_common(s,
false /* no wfi */,
entry, fence, need_sync_fence);
}
static int channel_sync_semaphore_incr_user(
struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool wfi,
bool need_sync_fence)
{
#ifndef CONFIG_NVGPU_SYNCFD_NONE
int err;
err = channel_sync_semaphore_incr_common(s, wfi, entry, fence,
need_sync_fence);
if (err != 0) {
return err;
}
return 0;
#else
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV;
#endif
}
static void channel_sync_semaphore_mark_progress(struct nvgpu_channel_sync *s,
bool register_irq)
{
struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_base(s);
(void)nvgpu_hw_semaphore_update_next(sp->hw_sema);
/*
* register_irq is ignored: there is only one semaphore interrupt that
* triggers nvgpu_channel_update() and it's always active.
*/
}
static void channel_sync_semaphore_set_min_eq_max(struct nvgpu_channel_sync *s)
{
struct nvgpu_channel_sync_semaphore *sp =
nvgpu_channel_sync_semaphore_from_base(s);
struct nvgpu_channel *c = sp->c;
bool updated;
updated = nvgpu_hw_semaphore_reset(sp->hw_sema);
if (updated) {
nvgpu_cond_broadcast_interruptible(&c->semaphore_wq);
}
}
static void channel_sync_semaphore_destroy(struct nvgpu_channel_sync *s)
{
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
struct nvgpu_channel *c = sema->c;
struct gk20a *g = c->g;
if (c->has_os_fence_framework_support &&
g->os_channel.os_fence_framework_inst_exists(c)) {
g->os_channel.destroy_os_fence_framework(c);
}
nvgpu_hw_semaphore_free(sema->hw_sema);
nvgpu_kfree(g, sema);
}
static const struct nvgpu_channel_sync_ops channel_sync_semaphore_ops = {
.wait_fence_fd = channel_sync_semaphore_wait_fd,
.incr = channel_sync_semaphore_incr,
.incr_user = channel_sync_semaphore_incr_user,
.mark_progress = channel_sync_semaphore_mark_progress,
.set_min_eq_max = channel_sync_semaphore_set_min_eq_max,
.destroy = channel_sync_semaphore_destroy,
};
/* Converts a valid struct nvgpu_channel_sync ptr to
* struct nvgpu_channel_sync_syncpt ptr else return NULL.
*/
struct nvgpu_channel_sync_semaphore *
nvgpu_channel_sync_to_semaphore(struct nvgpu_channel_sync *sync)
{
struct nvgpu_channel_sync_semaphore *sema = NULL;
if (sync->ops == &channel_sync_semaphore_ops) {
sema = nvgpu_channel_sync_semaphore_from_base(sync);
}
return sema;
}
struct nvgpu_hw_semaphore *
nvgpu_channel_sync_semaphore_hw_sema(struct nvgpu_channel_sync_semaphore *sema)
{
return sema->hw_sema;
}
struct nvgpu_channel_sync *
nvgpu_channel_sync_semaphore_create(struct nvgpu_channel *c)
{
struct nvgpu_channel_sync_semaphore *sema;
struct gk20a *g = c->g;
int asid = -1;
int err;
if (c->vm == NULL) {
nvgpu_do_assert();
return NULL;
}
sema = nvgpu_kzalloc(c->g, sizeof(*sema));
if (sema == NULL) {
return NULL;
}
sema->c = c;
err = nvgpu_hw_semaphore_init(c->vm, c->chid, &sema->hw_sema);
if (err != 0) {
goto err_free_sema;
}
if (c->vm->as_share != NULL) {
asid = c->vm->as_share->id;
}
if (c->has_os_fence_framework_support) {
/*Init the sync_timeline for this channel */
err = g->os_channel.init_os_fence_framework(c,
"gk20a_ch%d_as%d", c->chid, asid);
if (err != 0) {
goto err_free_hw_sema;
}
}
nvgpu_atomic_set(&sema->base.refcount, 0);
sema->base.ops = &channel_sync_semaphore_ops;
return &sema->base;
err_free_hw_sema:
nvgpu_hw_semaphore_free(sema->hw_sema);
err_free_sema:
nvgpu_kfree(g, sema);
return NULL;
}

View File

@@ -0,0 +1,419 @@
/*
* GK20A Channel Synchronization Abstraction
*
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#if !defined(CONFIG_NVGPU_SYNCFD_NONE) && !defined(CONFIG_TEGRA_GK20A_NVHOST_HOST1X)
#include <uapi/linux/nvhost_ioctl.h>
#endif
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/errata.h>
#include <nvgpu/atomic.h>
#include <nvgpu/bug.h>
#include <nvgpu/list.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/os_fence.h>
#include <nvgpu/os_fence_syncpts.h>
#include <nvgpu/channel.h>
#include <nvgpu/channel_sync.h>
#include <nvgpu/channel_sync_syncpt.h>
#include <nvgpu/priv_cmdbuf.h>
#include <nvgpu/fence.h>
#include <nvgpu/fence_syncpt.h>
#include <nvgpu/string.h>
#include "channel_sync_priv.h"
struct nvgpu_channel_sync_syncpt {
struct nvgpu_channel_sync base;
struct nvgpu_channel *c;
struct nvgpu_nvhost_dev *nvhost;
u32 id;
struct nvgpu_mem syncpt_buf;
u32 max_thresh;
};
static struct nvgpu_channel_sync_syncpt *
nvgpu_channel_sync_syncpt_from_base(struct nvgpu_channel_sync *base)
{
return (struct nvgpu_channel_sync_syncpt *)
((uintptr_t)base -
offsetof(struct nvgpu_channel_sync_syncpt, base));
}
static void channel_sync_syncpt_gen_wait_cmd(struct nvgpu_channel *c,
u32 id, u32 thresh, struct priv_cmd_entry *wait_cmd)
{
nvgpu_log(c->g, gpu_dbg_info, "sp->id %d gpu va %llx",
id, c->vm->syncpt_ro_map_gpu_va);
c->g->ops.sync.syncpt.add_wait_cmd(c->g, wait_cmd, id, thresh,
c->vm->syncpt_ro_map_gpu_va);
}
static int channel_sync_syncpt_wait_raw(struct nvgpu_channel_sync_syncpt *s,
u32 id, u32 thresh, struct priv_cmd_entry **wait_cmd)
{
struct nvgpu_channel *c = s->c;
int err = 0;
u32 wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(s->nvhost, id)) {
return -EINVAL;
}
err = nvgpu_priv_cmdbuf_alloc(c->priv_cmd_q, wait_cmd_size, wait_cmd);
if (err != 0) {
return err;
}
channel_sync_syncpt_gen_wait_cmd(c, id, thresh, *wait_cmd);
return 0;
}
#ifndef CONFIG_NVGPU_SYNCFD_NONE
struct gen_wait_cmd_iter_data {
struct nvgpu_channel *c;
struct priv_cmd_entry *wait_cmd;
};
static int gen_wait_cmd_iter(struct nvhost_ctrl_sync_fence_info info, void *d)
{
struct gen_wait_cmd_iter_data *data = d;
channel_sync_syncpt_gen_wait_cmd(data->c, info.id, info.thresh,
data->wait_cmd);
return 0;
}
static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **wait_cmd, u32 max_wait_cmds)
{
struct nvgpu_os_fence os_fence = {0};
struct nvgpu_os_fence_syncpt os_fence_syncpt = {0};
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
struct nvgpu_channel *c = sp->c;
struct gen_wait_cmd_iter_data iter_data = {
.c = c
};
u32 num_fences, wait_cmd_size;
int err = 0;
err = nvgpu_os_fence_fdget(&os_fence, c, fd);
if (err != 0) {
return -EINVAL;
}
err = nvgpu_os_fence_get_syncpts(&os_fence_syncpt, &os_fence);
if (err != 0) {
goto cleanup;
}
num_fences = nvgpu_os_fence_syncpt_get_num_syncpoints(&os_fence_syncpt);
if (num_fences == 0U) {
goto cleanup;
}
if ((max_wait_cmds != 0U) && (num_fences > max_wait_cmds)) {
err = -EINVAL;
goto cleanup;
}
wait_cmd_size = c->g->ops.sync.syncpt.get_wait_cmd_size();
err = nvgpu_priv_cmdbuf_alloc(c->priv_cmd_q,
wait_cmd_size * num_fences, wait_cmd);
if (err != 0) {
goto cleanup;
}
iter_data.wait_cmd = *wait_cmd;
nvgpu_os_fence_syncpt_foreach_pt(&os_fence_syncpt,
gen_wait_cmd_iter, &iter_data);
cleanup:
os_fence.ops->drop_ref(&os_fence);
return err;
}
#else /* CONFIG_NVGPU_SYNCFD_NONE */
static int channel_sync_syncpt_wait_fd(struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **wait_cmd, u32 max_wait_cmds)
{
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
nvgpu_err(sp->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV;
}
#endif /* CONFIG_NVGPU_SYNCFD_NONE */
static void channel_sync_syncpt_update(void *priv, int nr_completed)
{
struct nvgpu_channel *ch = priv;
nvgpu_channel_update(ch);
/* note: channel_get() is in channel_sync_syncpt_mark_progress() */
nvgpu_channel_put(ch);
}
static int channel_sync_syncpt_incr_common(struct nvgpu_channel_sync *s,
bool wfi_cmd,
struct priv_cmd_entry **incr_cmd,
struct nvgpu_fence_type *fence,
bool need_sync_fence)
{
u32 thresh;
int err;
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
struct nvgpu_channel *c = sp->c;
struct nvgpu_os_fence os_fence = {0};
struct gk20a *g = c->g;
err = nvgpu_priv_cmdbuf_alloc(c->priv_cmd_q,
g->ops.sync.syncpt.get_incr_cmd_size(wfi_cmd),
incr_cmd);
if (err != 0) {
return err;
}
nvgpu_log(g, gpu_dbg_info, "sp->id %d gpu va %llx",
sp->id, sp->syncpt_buf.gpu_va);
g->ops.sync.syncpt.add_incr_cmd(g, *incr_cmd,
sp->id, sp->syncpt_buf.gpu_va, wfi_cmd);
thresh = nvgpu_wrapping_add_u32(sp->max_thresh,
g->ops.sync.syncpt.get_incr_per_release());
if (need_sync_fence) {
err = nvgpu_os_fence_syncpt_create(&os_fence, c, sp->nvhost,
sp->id, thresh);
if (err != 0) {
goto clean_up_priv_cmd;
}
}
nvgpu_fence_from_syncpt(fence, sp->nvhost, sp->id, thresh, os_fence);
return 0;
clean_up_priv_cmd:
nvgpu_priv_cmdbuf_rollback(c->priv_cmd_q, *incr_cmd);
return err;
}
static int channel_sync_syncpt_incr(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool need_sync_fence)
{
/* Don't put wfi cmd to this one since we're not returning
* a fence to user space. */
return channel_sync_syncpt_incr_common(s, false, entry, fence,
need_sync_fence);
}
static int channel_sync_syncpt_incr_user(struct nvgpu_channel_sync *s,
struct priv_cmd_entry **entry,
struct nvgpu_fence_type *fence,
bool wfi,
bool need_sync_fence)
{
/* Need to do 'wfi + host incr' since we return the fence
* to user space. */
return channel_sync_syncpt_incr_common(s, wfi, entry, fence,
need_sync_fence);
}
static void channel_sync_syncpt_mark_progress(struct nvgpu_channel_sync *s,
bool register_irq)
{
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
struct nvgpu_channel *c = sp->c;
struct gk20a *g = c->g;
sp->max_thresh = nvgpu_wrapping_add_u32(sp->max_thresh,
g->ops.sync.syncpt.get_incr_per_release());
if (register_irq) {
struct nvgpu_channel *referenced = nvgpu_channel_get(c);
WARN_ON(referenced == NULL);
if (referenced != NULL) {
/*
* note: the matching channel_put() is in
* channel_sync_syncpt_update() that gets called when
* the job completes.
*/
int err = nvgpu_nvhost_intr_register_notifier(
sp->nvhost,
sp->id, sp->max_thresh,
channel_sync_syncpt_update, c);
if (err != 0) {
nvgpu_channel_put(referenced);
}
/*
* This never fails in practice. If it does, we won't
* be getting a completion signal to free the job
* resources, but maybe this succeeds on a possible
* subsequent submit, and the channel closure path will
* eventually mark everything completed anyway.
*/
WARN(err != 0,
"failed to set submit complete interrupt");
}
}
}
int nvgpu_channel_sync_wait_syncpt(struct nvgpu_channel_sync_syncpt *s,
u32 id, u32 thresh, struct priv_cmd_entry **entry)
{
return channel_sync_syncpt_wait_raw(s, id, thresh, entry);
}
static void channel_sync_syncpt_set_min_eq_max(struct nvgpu_channel_sync *s)
{
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
nvgpu_nvhost_syncpt_set_minval(sp->nvhost, sp->id, sp->max_thresh);
}
static u32 channel_sync_syncpt_get_id(struct nvgpu_channel_sync_syncpt *sp)
{
return sp->id;
}
static void channel_sync_syncpt_destroy(struct nvgpu_channel_sync *s)
{
struct nvgpu_channel_sync_syncpt *sp =
nvgpu_channel_sync_syncpt_from_base(s);
sp->c->g->ops.sync.syncpt.free_buf(sp->c, &sp->syncpt_buf);
channel_sync_syncpt_set_min_eq_max(s);
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost, sp->id);
nvgpu_kfree(sp->c->g, sp);
}
u32 nvgpu_channel_sync_get_syncpt_id(struct nvgpu_channel_sync_syncpt *s)
{
return channel_sync_syncpt_get_id(s);
}
static const struct nvgpu_channel_sync_ops channel_sync_syncpt_ops = {
.wait_fence_fd = channel_sync_syncpt_wait_fd,
.incr = channel_sync_syncpt_incr,
.incr_user = channel_sync_syncpt_incr_user,
.mark_progress = channel_sync_syncpt_mark_progress,
.set_min_eq_max = channel_sync_syncpt_set_min_eq_max,
.destroy = channel_sync_syncpt_destroy,
};
struct nvgpu_channel_sync_syncpt *
nvgpu_channel_sync_to_syncpt(struct nvgpu_channel_sync *sync)
{
struct nvgpu_channel_sync_syncpt *syncpt = NULL;
if (sync->ops == &channel_sync_syncpt_ops) {
syncpt = nvgpu_channel_sync_syncpt_from_base(sync);
}
return syncpt;
}
struct nvgpu_channel_sync *
nvgpu_channel_sync_syncpt_create(struct nvgpu_channel *c)
{
struct nvgpu_channel_sync_syncpt *sp;
char syncpt_name[32];
int err;
sp = nvgpu_kzalloc(c->g, sizeof(*sp));
if (sp == NULL) {
return NULL;
}
sp->c = c;
sp->nvhost = c->g->nvhost;
snprintf(syncpt_name, sizeof(syncpt_name),
"%s_%d", c->g->name, c->chid);
sp->id = nvgpu_nvhost_get_syncpt_client_managed(sp->nvhost,
syncpt_name);
/**
* This is a fix to handle invalid value of a syncpt.
* Once nvhost update the return value as NVGPU_INVALID_SYNCPT_ID,
* we can remove the zero check.
*/
if ((nvgpu_is_errata_present(c->g, NVGPU_ERRATA_SYNCPT_INVALID_ID_0)) &&
(sp->id == 0U)) {
nvgpu_err(c->g, "failed to get free syncpt");
goto err_free;
}
if (sp->id == NVGPU_INVALID_SYNCPT_ID) {
nvgpu_err(c->g, "failed to get free syncpt");
goto err_free;
}
err = sp->c->g->ops.sync.syncpt.alloc_buf(sp->c, sp->id,
&sp->syncpt_buf);
if (err != 0) {
nvgpu_err(c->g, "failed to allocate syncpoint buffer");
goto err_put;
}
err = nvgpu_nvhost_syncpt_read_ext_check(sp->nvhost, sp->id,
&sp->max_thresh);
if (err != 0) {
goto err_free_buf;
}
nvgpu_atomic_set(&sp->base.refcount, 0);
sp->base.ops = &channel_sync_syncpt_ops;
return &sp->base;
err_free_buf:
sp->c->g->ops.sync.syncpt.free_buf(sp->c, &sp->syncpt_buf);
err_put:
nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost, sp->id);
err_free:
nvgpu_kfree(c->g, sp);
return NULL;
}

View File

@@ -0,0 +1,144 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <nvgpu/kmem.h>
#include <nvgpu/log.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/gk20a.h>
#include <nvgpu/channel.h>
#include <nvgpu/channel_user_syncpt.h>
#include <nvgpu/string.h>
#include <nvgpu/errata.h>
#include "channel_user_syncpt_priv.h"
static int user_sync_build_debug_name(struct nvgpu_channel *ch,
char *buf, size_t capacity)
{
struct gk20a *g = ch->g;
int n;
(void)strncpy(buf, g->name, capacity);
capacity = nvgpu_safe_sub_u64(capacity, strlen(g->name));
(void)strncat(buf, "_", capacity);
capacity = nvgpu_safe_sub_u64(capacity, strlen("_"));
/*
* however, nvgpu_strnadd_u32 expects capacity to include the
* terminating byte, so add it back
*/
capacity = nvgpu_safe_add_u64(capacity, 1);
n = nvgpu_strnadd_u32(&buf[strlen(buf)], ch->chid,
capacity, 10);
if (n == 0) {
nvgpu_err(g, "strnadd failed!");
return -EINVAL;
}
capacity = nvgpu_safe_sub_u64(capacity, nvgpu_safe_cast_s32_to_u64(n));
/* nul byte */
capacity = nvgpu_safe_sub_u64(capacity, 1UL);
(void)strncat(buf, "_user", capacity);
/* make sure it didn't get truncated */
capacity = nvgpu_safe_sub_u64(capacity, strlen("_user"));
return 0;
}
struct nvgpu_channel_user_syncpt *
nvgpu_channel_user_syncpt_create(struct nvgpu_channel *ch)
{
struct gk20a *g = ch->g;
struct nvgpu_channel_user_syncpt *s;
char syncpt_name[SYNCPT_NAME_SZ] = {0}; /* e.g. gp10b_42_user */
int err;
s = nvgpu_kzalloc(ch->g, sizeof(*s));
if (s == NULL) {
return NULL;
}
s->ch = ch;
s->nvhost = g->nvhost;
err = user_sync_build_debug_name(ch, syncpt_name,
SYNCPT_NAME_SZ - 1UL);
if (err < 0) {
goto err_free;
}
s->syncpt_id = nvgpu_nvhost_get_syncpt_client_managed(s->nvhost,
syncpt_name);
/**
* This is a fix to handle invalid value of a syncpt.
* Once nvhost update the return value as NVGPU_INVALID_SYNCPT_ID,
* we can remove the zero check.
*/
if ((nvgpu_is_errata_present(g, NVGPU_ERRATA_SYNCPT_INVALID_ID_0)) &&
(s->syncpt_id == 0U)) {
nvgpu_err(g, "failed to get free syncpt");
goto err_free;
}
if (s->syncpt_id == NVGPU_INVALID_SYNCPT_ID) {
nvgpu_err(g, "failed to get free syncpt");
goto err_free;
}
err = g->ops.sync.syncpt.alloc_buf(ch, s->syncpt_id, &s->syncpt_buf);
if (err != 0) {
nvgpu_err(g, "failed to allocate syncpoint buffer");
goto err_put;
}
return s;
err_put:
nvgpu_nvhost_syncpt_put_ref_ext(s->nvhost, s->syncpt_id);
err_free:
nvgpu_kfree(g, s);
return NULL;
}
u32 nvgpu_channel_user_syncpt_get_id(struct nvgpu_channel_user_syncpt *s)
{
return s->syncpt_id;
}
u64 nvgpu_channel_user_syncpt_get_address(struct nvgpu_channel_user_syncpt *s)
{
return s->syncpt_buf.gpu_va;
}
void nvgpu_channel_user_syncpt_set_safe_state(struct nvgpu_channel_user_syncpt *s)
{
nvgpu_nvhost_syncpt_set_safe_state(s->nvhost, s->syncpt_id);
}
void nvgpu_channel_user_syncpt_destroy(struct nvgpu_channel_user_syncpt *s)
{
struct gk20a *g = s->ch->g;
g->ops.sync.syncpt.free_buf(s->ch, &s->syncpt_buf);
nvgpu_nvhost_syncpt_put_ref_ext(s->nvhost, s->syncpt_id);
nvgpu_kfree(g, s);
}

View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef NVGPU_USER_SYNCPT_PRIV_H
#define NVGPU_USER_SYNCPT_PRIV_H
#include <nvgpu/types.h>
#include <nvgpu/nvgpu_mem.h>
struct nvgpu_channel;
struct nvgpu_nvhost_dev;
struct nvgpu_channel_user_syncpt {
struct nvgpu_channel *ch;
struct nvgpu_nvhost_dev *nvhost;
uint32_t syncpt_id;
struct nvgpu_mem syncpt_buf;
};
#define SYNCPT_NAME_SZ 32UL
#endif /* NVGPU_USER_SYNC_PRIV_H */