gpu: nvgpu: move nvgpu_has_syncpoints

nvgpu_has_syncpoints is more general than a channel synchronization
related, so move it to nvhost.c from channel_sync.c. Move the
declaration from gk20a.h to nvhost.h.

As the debugfs knob is Linux related, move it from struct gk20a to
struct nvgpu_os_linux.

Jira NVGPU-4548

Change-Id: I4236086744993c3daac042f164de30939c01ee77
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2318814
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-03-26 12:07:41 +02:00
committed by Alex Waterman
parent db9c1b1f97
commit d0ffb335dc
13 changed files with 53 additions and 30 deletions

View File

@@ -37,6 +37,7 @@
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/gops_mc.h>
#include <nvgpu/trace.h>
#include <nvgpu/nvhost.h>
#ifdef CONFIG_NVGPU_LS_PMU
#include <nvgpu/pmu/pmu_pstate.h>

View File

@@ -39,6 +39,7 @@
#include <nvgpu/vgpu/vm_vgpu.h>
#include <nvgpu/cbc.h>
#include <nvgpu/static_analysis.h>
#include <nvgpu/nvhost.h>
struct nvgpu_ctag_buffer_info {
u64 size;

View File

@@ -58,16 +58,6 @@ bool nvgpu_channel_sync_needs_os_fence_framework(struct gk20a *g)
return !nvgpu_has_syncpoints(g);
}
bool nvgpu_has_syncpoints(struct gk20a *g)
{
#ifdef CONFIG_TEGRA_GK20A_NVHOST
return nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS) &&
!g->disable_syncpoints;
#else
return false;
#endif
}
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
int nvgpu_channel_sync_wait_fence_fd(struct nvgpu_channel_sync *s, int fd,
struct priv_cmd_entry *entry, u32 max_wait_cmds)

View File

@@ -234,6 +234,7 @@
#include <nvgpu/pmu/pmu_perfmon.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/sbr.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/hw/tu104/hw_pwr_tu104.h>

View File

@@ -800,9 +800,6 @@ struct gk20a {
unsigned int aggressive_sync_destroy_thresh;
bool aggressive_sync_destroy;
/** Debugfs knob for forcing syncpt support off in runtime. */
bool disable_syncpoints;
/** Is LS PMU supported? */
bool support_ls_pmu;
@@ -1086,8 +1083,6 @@ int gk20a_do_unidle_impl(struct gk20a *g);
/** tu104 HW version */
#define NVGPU_GPUID_TU104 0x00000164U
bool nvgpu_has_syncpoints(struct gk20a *g);
/**
* @}
*/

View File

@@ -23,12 +23,13 @@
#ifndef NVGPU_NVHOST_H
#define NVGPU_NVHOST_H
#ifdef CONFIG_TEGRA_GK20A_NVHOST
#include <nvgpu/types.h>
struct nvgpu_nvhost_dev;
struct gk20a;
#ifdef CONFIG_TEGRA_GK20A_NVHOST
struct nvgpu_nvhost_dev;
struct sync_pt;
struct sync_fence;
struct timespec;
@@ -71,6 +72,15 @@ int nvgpu_get_nvhost_dev(struct gk20a *g);
*/
void nvgpu_free_nvhost_dev(struct gk20a *g);
/**
* @brief Check if the gpu has access to syncpoints.
*
* @param g [in] The GPU super structure.
*
* @return whether syncpt access is available
*/
bool nvgpu_has_syncpoints(struct gk20a *g);
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
/**
* Available waiter index is used for sync point wait path.
@@ -289,6 +299,19 @@ static inline int nvgpu_nvhost_syncpt_init(struct gk20a *g)
{
return 0;
}
#endif
#endif /* CONFIG_TEGRA_T19X_GRHOST */
#else /* CONFIG_TEGRA_GK20A_NVHOST */
/**
* @brief Check if the gpu has access to syncpoints.
*
* @param g [in] The GPU super structure.
*
* @return whether syncpt access is available
*/
static inline bool nvgpu_has_syncpoints(struct gk20a *g)
{
return false;
}
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#endif /* NVGPU_NVHOST_H */

View File

@@ -37,9 +37,11 @@ struct nvgpu_nvhost_dev {
u32 syncpt_value;
};
int nvgpu_get_nvhost_dev(struct gk20a *g);
void nvgpu_free_nvhost_dev(struct gk20a *g);
int nvgpu_get_nvhost_dev(struct gk20a *g);
bool nvgpu_has_syncpoints(struct gk20a *g);
int nvgpu_nvhost_get_syncpt_aperture(
struct nvgpu_nvhost_dev *nvgpu_syncpt_dev,

View File

@@ -363,7 +363,7 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
l->debugfs, &g->ch_wdt_init_limit_ms);
debugfs_create_bool("disable_syncpoints", S_IRUGO,
l->debugfs, &g->disable_syncpoints);
l->debugfs, &l->disable_syncpoints);
/* New debug logging API. */
debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR,

View File

@@ -26,6 +26,7 @@
#include <nvgpu/gk20a.h>
#include <nvgpu/nvgpu_init.h>
#include <nvgpu/channel.h>
#include <nvgpu/nvhost.h>
#include <nvgpu/linux/vm.h>

View File

@@ -67,6 +67,14 @@ void nvgpu_free_nvhost_dev(struct gk20a *g)
nvgpu_kfree(g, g->nvhost);
}
bool nvgpu_has_syncpoints(struct gk20a *g)
{
struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
return nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS) &&
!l->disable_syncpoints;
}
int nvgpu_nvhost_module_busy_ext(
struct nvgpu_nvhost_dev *nvhost_dev)
{

View File

@@ -175,6 +175,9 @@ struct nvgpu_os_linux {
struct rw_semaphore busy_lock;
bool init_done;
/** Debugfs knob for forcing syncpt support off in runtime. */
bool disable_syncpoints;
};
static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g)

View File

@@ -32,6 +32,11 @@ void nvgpu_free_nvhost_dev(struct gk20a *g) {
}
}
bool nvgpu_has_syncpoints(struct gk20a *g)
{
return nvgpu_is_enabled(g, NVGPU_HAS_SYNCPOINTS);
}
static void allocate_new_syncpt(struct nvgpu_nvhost_dev *nvgpu_syncpt_dev)
{
u32 syncpt_id, syncpt_val;

View File

@@ -424,7 +424,6 @@ done:
return ret;
}
#define F_SYNC_GLOBAL_DISABLE_SYNCPT 0
#define F_SYNC_SYNCPT_ALLOC_FAILED 1
#define F_SYNC_USER_MANAGED 2
#define F_SYNC_STRADD_FAIL 3
@@ -451,10 +450,6 @@ static void clear_test_params(struct gk20a *g,
bool *fault_injection_enabled, u32 branch,
struct nvgpu_posix_fault_inj *kmem_fi)
{
if (g->disable_syncpoints) {
g->disable_syncpoints = false;
}
if (ch->vm->guest_managed) {
ch->vm->guest_managed = false;
}
@@ -491,9 +486,7 @@ int test_sync_create_fail(struct unit_module *m, struct gk20a *g, void *args)
*/
g->nvhost->syncpt_id = 0U;
if (branches == F_SYNC_GLOBAL_DISABLE_SYNCPT) {
g->disable_syncpoints = true;
} else if (branches == F_SYNC_SYNCPT_ALLOC_FAILED) {
if (branches == F_SYNC_SYNCPT_ALLOC_FAILED) {
/* fail first kzalloc call */
nvgpu_posix_enable_fault_injection(kmem_fi, true, 0);
fault_injection_enabled = true;