gpu: nvgpu: add internal CONFIG_SYNC wrapper

The sync file support in Linux has been stabilized and the new config is
called CONFIG_SYNC_FILE. Even if maybe not so intended, both the
stabilized version and the legacy CONFIG_SYNC can coexist; to begin with
supporting the stabilized version, add CONFIG_NVGPU_SYNCFD_ANDROID and
CONFIG_NVGPU_SYNCFD_NONE as choice configs of which one will be set. A
later patch will extend this with a choice for CONFIG_SYNC_FILE.

Jira NVGPU-5353

Change-Id: I67582b68d700b16c46e1cd090f1b938067a364e3
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2336118
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-05-04 10:04:39 +03:00
committed by Alex Waterman
parent 068e00749b
commit e5b23f33b9
13 changed files with 84 additions and 43 deletions

View File

@@ -233,3 +233,29 @@ config NVGPU_TEGRA_FUSE
default y
help
Support Tegra fuse
choice
prompt "Supported sync fence backend"
default NVGPU_SYNCFD_ANDROID
depends on GK20A
help
GPU job synchronization (fences before and after submits) can use raw
syncpoints if available and sync fds if chosen. Without syncpoints,
nvgpu also provides semaphore-backed sync fds to userspace.
Select which kernel-provided API is used for sync fds. Matching
support is required for the userspace drivers too.
config NVGPU_SYNCFD_ANDROID
bool "Android SYNC"
depends on SYNC
help
Select CONFIG_SYNC, the legacy synchronization framework provided by
Android (and deprecated in Linux 4.9+ in favor of dma fences and the
stabilized SYNC_FILE).
config NVGPU_SYNCFD_NONE
bool "Nothing"
help
Do not build in support for sync fences.
endchoice

View File

@@ -439,13 +439,13 @@ nvgpu-$(CONFIG_NVGPU_TEGRA_FUSE) += os/linux/fuse.o \
os/linux/soc.o
endif
nvgpu-$(CONFIG_SYNC) += \
nvgpu-$(CONFIG_NVGPU_SYNCFD_ANDROID) += \
os/linux/sync_sema_android.o \
os/linux/os_fence_android.o \
os/linux/os_fence_android_sema.o
ifeq ($(CONFIG_TEGRA_GK20A_NVHOST), y)
nvgpu-$(CONFIG_SYNC) += \
nvgpu-$(CONFIG_NVGPU_SYNCFD_ANDROID) += \
os/linux/os_fence_android_syncpt.o
nvgpu-y += common/sync/channel_sync_syncpt.o
endif

View File

@@ -64,6 +64,9 @@ NVGPU_COMMON_CFLAGS += \
CONFIG_NVGPU_LOGGING := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_LOGGING
CONFIG_NVGPU_SYNCFD_NONE := 1
NVGPU_COMMON_CFLAGS += -DCONFIG_NVGPU_SYNCFD_NONE
ifeq ($(profile),$(filter $(profile),safety_debug safety_release))
# Enable golden context verification only for safety debug/release build

View File

@@ -56,6 +56,7 @@ nvgpu_channel_sync_semaphore_from_base(struct nvgpu_channel_sync *base)
offsetof(struct nvgpu_channel_sync_semaphore, base));
}
#ifndef CONFIG_NVGPU_SYNCFD_NONE
static void add_sema_wait_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd)
{
@@ -73,6 +74,24 @@ static void add_sema_wait_cmd(struct gk20a *g, struct nvgpu_channel *c,
va, cmd);
}
static void channel_sync_semaphore_gen_wait_cmd(struct nvgpu_channel *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size)
{
bool has_incremented;
if (sema == NULL) {
/* came from an expired sync fence */
nvgpu_priv_cmdbuf_append_zeros(c->g, wait_cmd, wait_cmd_size);
} else {
has_incremented = nvgpu_semaphore_can_wait(sema);
nvgpu_assert(has_incremented);
add_sema_wait_cmd(c->g, c, sema, wait_cmd);
nvgpu_semaphore_put(sema);
}
}
#endif
static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c,
struct nvgpu_semaphore *s, struct priv_cmd_entry *cmd,
bool wfi)
@@ -95,27 +114,11 @@ static void add_sema_incr_cmd(struct gk20a *g, struct nvgpu_channel *c,
va, cmd);
}
static void channel_sync_semaphore_gen_wait_cmd(struct nvgpu_channel *c,
struct nvgpu_semaphore *sema, struct priv_cmd_entry *wait_cmd,
u32 wait_cmd_size)
{
bool has_incremented;
if (sema == NULL) {
/* came from an expired sync fence */
nvgpu_priv_cmdbuf_append_zeros(c->g, wait_cmd, wait_cmd_size);
} else {
has_incremented = nvgpu_semaphore_can_wait(sema);
nvgpu_assert(has_incremented);
add_sema_wait_cmd(c->g, c, sema, wait_cmd);
nvgpu_semaphore_put(sema);
}
}
static int channel_sync_semaphore_wait_fd(
struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry **entry, u32 max_wait_cmds)
{
#ifndef CONFIG_NVGPU_SYNCFD_NONE
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
struct nvgpu_channel *c = sema->c;
@@ -164,6 +167,14 @@ static int channel_sync_semaphore_wait_fd(
cleanup:
os_fence.ops->drop_ref(&os_fence);
return err;
#else
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV;
#endif
}
static int channel_sync_semaphore_incr_common(
@@ -246,7 +257,7 @@ static int channel_sync_semaphore_incr_user(
bool need_sync_fence,
bool register_irq)
{
#ifdef CONFIG_SYNC
#ifndef CONFIG_NVGPU_SYNCFD_NONE
int err;
err = channel_sync_semaphore_incr_common(s, wfi, entry, fence,
@@ -259,8 +270,9 @@ static int channel_sync_semaphore_incr_user(
#else
struct nvgpu_channel_sync_semaphore *sema =
nvgpu_channel_sync_semaphore_from_base(s);
nvgpu_err(sema->c->g,
"trying to use sync fds with CONFIG_SYNC disabled");
"trying to use sync fds with CONFIG_NVGPU_SYNCFD_NONE");
return -ENODEV;
#endif
}

View File

@@ -241,7 +241,7 @@ u32 nvgpu_nvhost_get_syncpt_client_managed(struct nvgpu_nvhost_dev
*nvgpu_syncpt_dev,
const char *syncpt_name);
#ifdef CONFIG_SYNC
#ifdef CONFIG_NVGPU_SYNCFD_ANDROID
u32 nvgpu_nvhost_sync_pt_id(struct sync_pt *pt);
u32 nvgpu_nvhost_sync_pt_thresh(struct sync_pt *pt);
int nvgpu_nvhost_sync_num_pts(struct sync_fence *fence);
@@ -250,7 +250,7 @@ struct sync_fence *nvgpu_nvhost_sync_fdget(int fd);
struct sync_fence *nvgpu_nvhost_sync_create_fence(
struct nvgpu_nvhost_dev *nvgpu_syncpt_dev,
u32 id, u32 thresh, const char *name);
#endif /* CONFIG_SYNC */
#endif /* CONFIG_NVGPU_SYNCFD_ANDROID */
#ifdef CONFIG_TEGRA_T19X_GRHOST

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -79,7 +79,7 @@ static inline bool nvgpu_os_fence_is_initialized(struct nvgpu_os_fence *fence)
return (fence->ops != NULL);
}
#ifdef CONFIG_SYNC
#ifndef CONFIG_NVGPU_SYNCFD_NONE
int nvgpu_os_fence_sema_create(
struct nvgpu_os_fence *fence_out,
@@ -106,9 +106,9 @@ static inline int nvgpu_os_fence_fdget(
return -ENOSYS;
}
#endif /* CONFIG_SYNC */
#endif /* !CONFIG_NVGPU_SYNCFD_NONE */
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_SYNC)
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && !defined(CONFIG_NVGPU_SYNCFD_NONE)
int nvgpu_os_fence_syncpt_create(struct nvgpu_os_fence *fence_out,
struct nvgpu_channel *c, struct nvgpu_nvhost_dev *nvhost_dev,
@@ -124,6 +124,6 @@ static inline int nvgpu_os_fence_syncpt_create(
return -ENOSYS;
}
#endif /* CONFIG_TEGRA_GK20A_NVHOST && CONFIG_SYNC */
#endif /* CONFIG_TEGRA_GK20A_NVHOST && !CONFIG_NVGPU_SYNCFD_NONE */
#endif /* NVGPU_OS_FENCE_H */

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence semas
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -32,7 +32,7 @@ struct nvgpu_os_fence_sema {
struct nvgpu_os_fence *fence;
};
#ifdef CONFIG_SYNC
#if !defined(CONFIG_NVGPU_SYNCFD_NONE)
/*
* Return a struct of nvgpu_os_fence_sema only if the underlying os_fence
* object is backed by semaphore, else return empty object.
@@ -79,6 +79,6 @@ static inline u32 nvgpu_os_fence_sema_get_num_semaphores(
return 0;
}
#endif
#endif /* !CONFIG_NVGPU_SYNCFD_NONE */
#endif /* NVGPU_OS_FENCE_SEMAS_H */
#endif /* NVGPU_OS_FENCE_SEMAS_H */

View File

@@ -1,7 +1,7 @@
/*
* nvgpu os fence syncpts
*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -31,7 +31,7 @@ struct nvgpu_os_fence_syncpt {
struct nvgpu_os_fence *fence;
};
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && defined(CONFIG_SYNC)
#if defined(CONFIG_TEGRA_GK20A_NVHOST) && !defined(CONFIG_NVGPU_SYNCFD_NONE)
/*
* Return a struct of nvgpu_os_fence_syncpt only if the underlying os_fence
* object is backed by syncpoints, else return empty object.
@@ -80,4 +80,4 @@ static inline u32 nvgpu_os_fence_syncpt_get_num_syncpoints(
#endif
#endif /* NVGPU_OS_FENCE_SYNPT_H */
#endif /* NVGPU_OS_FENCE_SYNPT_H */

View File

@@ -810,7 +810,7 @@ static int gk20a_ioctl_channel_submit_gpfifo(
return -ETIMEDOUT;
}
#ifndef CONFIG_SYNC
#ifdef CONFIG_NVGPU_SYNCFD_NONE
if (flag_sync_fence) {
return -EINVAL;
}

View File

@@ -291,7 +291,7 @@ static int nvgpu_channel_alloc_linux(struct gk20a *g, struct nvgpu_channel *ch)
ch->os_priv = priv;
priv->ch = ch;
#ifdef CONFIG_SYNC
#ifndef CONFIG_NVGPU_SYNCFD_NONE
ch->has_os_fence_framework_support = true;
#endif
@@ -311,7 +311,7 @@ static void nvgpu_channel_free_linux(struct gk20a *g, struct nvgpu_channel *ch)
ch->os_priv = NULL;
#ifdef CONFIG_SYNC
#ifndef CONFIG_NVGPU_SYNCFD_NONE
ch->has_os_fence_framework_support = false;
#endif
}

View File

@@ -317,7 +317,7 @@ void gk20a_init_linux_characteristics(struct gk20a *g)
nvgpu_set_enabled(g, NVGPU_SUPPORT_DETERMINISTIC_OPTS, true);
nvgpu_set_enabled(g, NVGPU_SUPPORT_USERSPACE_MANAGED_AS, true);
if (IS_ENABLED(CONFIG_SYNC)) {
if (!IS_ENABLED(CONFIG_NVGPU_SYNCFD_NONE)) {
nvgpu_set_enabled(g, NVGPU_SUPPORT_SYNC_FENCE_FDS, true);
}
}

View File

@@ -226,7 +226,7 @@ void nvgpu_nvhost_remove_symlink(struct gk20a *g)
}
}
#ifdef CONFIG_SYNC
#ifndef CONFIG_NVGPU_SYNCFD_NONE
u32 nvgpu_nvhost_sync_pt_id(struct sync_pt *pt)
{
return nvhost_sync_pt_id(pt);
@@ -258,7 +258,7 @@ struct sync_fence *nvgpu_nvhost_sync_create_fence(
return nvhost_sync_create_fence(nvhost_dev->host1x_pdev, &pt, 1, name);
}
#endif /* CONFIG_SYNC */
#endif /* !CONFIG_NVGPU_SYNCFD_NONE */
#ifdef CONFIG_TEGRA_T19X_GRHOST
int nvgpu_nvhost_get_syncpt_aperture(

View File

@@ -1,7 +1,7 @@
/*
* Semaphore Sync Framework Integration
*
* Copyright (c) 2017-2018, NVIDIA Corporation. All rights reserved.
* Copyright (c) 2017-2020, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -25,7 +25,7 @@ struct sync_pt;
struct nvgpu_semaphore;
struct fence;
#ifdef CONFIG_SYNC
#ifdef CONFIG_NVGPU_SYNCFD_ANDROID
struct sync_timeline *gk20a_sync_timeline_create(const char *name);
void gk20a_sync_timeline_destroy(struct sync_timeline *);
void gk20a_sync_timeline_signal(struct sync_timeline *);