gpu: nvgpu: guard sync cmd hals properly

Make the syncpt and sema wait and incr command HAL ops consistent. Add
CONFIG_NVGPU_SW_SEMAPHORE guards for the semaphore ops. The syncpoint
ops already have CONFIG_TEGRA_GK20A_NVHOST around them.

Delete the dummy syncpt ops. They are not used; the ops are only needed
when the real versions exist.

Jira NVGPU-4548

Change-Id: I30315a67169b31b1d63a0a1a0a4492688db4a2bc
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2325100
(cherry picked from commit ed13b286c5fbdbc008ec59172d98ac79e9f2e733)
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2331337
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2020-04-03 16:15:28 +03:00
committed by Alex Waterman
parent 39844fb27c
commit 4acf78dff3
11 changed files with 39 additions and 73 deletions

View File

@@ -722,22 +722,23 @@ static const struct gpu_ops gm20b_ops = {
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = NULL,
.alloc_buf = gk20a_syncpt_alloc_buf,
.free_buf = gk20a_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
.add_wait_cmd = gk20a_syncpt_add_wait_cmd,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
.get_wait_cmd_size =
gk20a_syncpt_get_wait_cmd_size,
.add_incr_cmd = gk20a_syncpt_add_incr_cmd,
.get_incr_cmd_size =
gk20a_syncpt_get_incr_cmd_size,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = NULL,
},
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
.sema = {
.add_wait_cmd = gk20a_sema_add_wait_cmd,
.get_wait_cmd_size = gk20a_sema_get_wait_cmd_size,

View File

@@ -820,22 +820,23 @@ static const struct gpu_ops gp10b_ops = {
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = NULL,
.alloc_buf = gk20a_syncpt_alloc_buf,
.free_buf = gk20a_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
.add_wait_cmd = gk20a_syncpt_add_wait_cmd,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
.get_wait_cmd_size =
gk20a_syncpt_get_wait_cmd_size,
.add_incr_cmd = gk20a_syncpt_add_incr_cmd,
.get_incr_cmd_size =
gk20a_syncpt_get_incr_cmd_size,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = NULL,
},
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
.sema = {
.add_wait_cmd = gk20a_sema_add_wait_cmd,
.get_wait_cmd_size = gk20a_sema_get_wait_cmd_size,

View File

@@ -1024,6 +1024,7 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = gv11b_syncpt_get_sync_ro_map,
.alloc_buf = gv11b_syncpt_alloc_buf,
.free_buf = gv11b_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
@@ -1036,10 +1037,10 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 8_7))
.get_incr_per_release =
gv11b_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = gv11b_syncpt_get_sync_ro_map,
},
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#ifdef CONFIG_NVGPU_SW_SEMAPHORE
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
.sema = {
.add_wait_cmd = gv11b_sema_add_wait_cmd,
.get_wait_cmd_size = gv11b_sema_get_wait_cmd_size,

View File

@@ -1049,6 +1049,7 @@ static const struct gpu_ops tu104_ops = {
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = gv11b_syncpt_get_sync_ro_map,
.alloc_buf = gv11b_syncpt_alloc_buf,
.free_buf = gv11b_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
@@ -1061,7 +1062,6 @@ static const struct gpu_ops tu104_ops = {
.get_incr_per_release =
gv11b_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = gv11b_syncpt_get_sync_ro_map,
},
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \

View File

@@ -22,6 +22,9 @@
#ifndef NVGPU_SYNC_SEMA_CMDBUF_GK20A_H
#define NVGPU_SYNC_SEMA_CMDBUF_GK20A_H
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
#include <nvgpu/types.h>
struct gk20a;
@@ -38,4 +41,6 @@ void gk20a_sema_add_incr_cmd(struct gk20a *g,
struct nvgpu_semaphore *s, u64 sema_va,
bool wfi);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT && CONFIG_NVGPU_SW_SEMAPHORE */
#endif /* NVGPU_SYNC_SEMA_CMDBUF_GK20A_H */

View File

@@ -22,6 +22,9 @@
#ifndef NVGPU_SYNC_SEMA_CMDBUF_GV11B_H
#define NVGPU_SYNC_SEMA_CMDBUF_GV11B_H
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
#include <nvgpu/types.h>
struct gk20a;
@@ -38,4 +41,6 @@ void gv11b_sema_add_incr_cmd(struct gk20a *g,
struct nvgpu_semaphore *s, u64 sema_va,
bool wfi);
#endif /* CONFIG_NVGPU_KERNEL_MODE_SUBMIT && CONFIG_NVGPU_SW_SEMAPHORE */
#endif /* NVGPU_SYNC_SEMA_CMDBUF_GV11B_H */

View File

@@ -50,30 +50,6 @@ int gk20a_syncpt_alloc_buf(struct nvgpu_channel *c,
#else
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
static inline void gk20a_syncpt_add_wait_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd,
u32 id, u32 thresh, u64 gpu_va_base)
{
}
static inline u32 gk20a_syncpt_get_wait_cmd_size(void)
{
return 0U;
}
static inline u32 gk20a_syncpt_get_incr_per_release(void)
{
return 0U;
}
static inline void gk20a_syncpt_add_incr_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd,
u32 id, u64 gpu_va, bool wfi)
{
}
static inline u32 gk20a_syncpt_get_incr_cmd_size(bool wfi_cmd)
{
return 0U;
}
#endif
static inline void gk20a_syncpt_free_buf(struct nvgpu_channel *c,
struct nvgpu_mem *syncpt_buf)
{
@@ -85,6 +61,6 @@ static inline int gk20a_syncpt_alloc_buf(struct nvgpu_channel *c,
return -ENOSYS;
}
#endif
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#endif /* NVGPU_SYNC_SYNCPT_CMDBUF_GK20A_H */

View File

@@ -56,31 +56,6 @@ int gv11b_syncpt_get_sync_ro_map(struct vm_gk20a *vm,
#else
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
static inline void gv11b_syncpt_add_wait_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd,
u32 id, u32 thresh, u64 gpu_va_base)
{
}
static inline u32 gv11b_syncpt_get_wait_cmd_size(void)
{
return 0U;
}
static inline u32 gv11b_syncpt_get_incr_per_release(void)
{
return 0U;
}
static inline void gv11b_syncpt_add_incr_cmd(struct gk20a *g,
struct priv_cmd_entry *cmd,
u32 id, u64 gpu_va, bool wfi)
{
}
static inline u32 gv11b_syncpt_get_incr_cmd_size(bool wfi_cmd)
{
return 0U;
}
#endif
static inline void gv11b_syncpt_free_buf(struct nvgpu_channel *c,
struct nvgpu_mem *syncpt_buf)
{
@@ -98,6 +73,6 @@ static inline int gv11b_syncpt_get_sync_ro_map(struct vm_gk20a *vm,
return -EINVAL;
}
#endif
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#endif /* NVGPU_SYNC_SYNCPT_CMDBUF_GV11B_H */

View File

@@ -524,22 +524,23 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = NULL,
.alloc_buf = gk20a_syncpt_alloc_buf,
.free_buf = gk20a_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
.add_wait_cmd = gk20a_syncpt_add_wait_cmd,
.get_wait_cmd_size =
gk20a_syncpt_get_wait_cmd_size,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
.add_incr_cmd = gk20a_syncpt_add_incr_cmd,
.get_incr_cmd_size =
gk20a_syncpt_get_incr_cmd_size,
.get_incr_per_release =
gk20a_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = NULL,
},
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
.sema = {
.add_wait_cmd = gk20a_sema_add_wait_cmd,
.get_wait_cmd_size = gk20a_sema_get_wait_cmd_size,

View File

@@ -644,22 +644,23 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.sync = {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
.syncpt = {
.get_sync_ro_map = vgpu_gv11b_syncpt_get_sync_ro_map,
.alloc_buf = vgpu_gv11b_syncpt_alloc_buf,
.free_buf = vgpu_gv11b_syncpt_free_buf,
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
.add_wait_cmd = gv11b_syncpt_add_wait_cmd,
.get_wait_cmd_size =
gv11b_syncpt_get_wait_cmd_size,
.get_incr_per_release =
gv11b_syncpt_get_incr_per_release,
.add_incr_cmd = gv11b_syncpt_add_incr_cmd,
.get_incr_cmd_size =
gv11b_syncpt_get_incr_cmd_size,
.get_incr_per_release =
gv11b_syncpt_get_incr_per_release,
#endif
.get_sync_ro_map = vgpu_gv11b_syncpt_get_sync_ro_map,
},
#endif
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
.sema = {
.add_wait_cmd = gv11b_sema_add_wait_cmd,
.get_wait_cmd_size = gv11b_sema_get_wait_cmd_size,

View File

@@ -43,7 +43,6 @@ struct nvgpu_semaphore;
* @see gpu_ops
*/
struct gops_sync {
#ifdef CONFIG_TEGRA_GK20A_NVHOST
struct gops_sync_syncpt {
/**
@@ -91,7 +90,8 @@ struct gops_sync {
#endif /* CONFIG_TEGRA_GK20A_NVHOST */
#ifdef CONFIG_NVGPU_KERNEL_MODE_SUBMIT
#if defined(CONFIG_NVGPU_KERNEL_MODE_SUBMIT) && \
defined(CONFIG_NVGPU_SW_SEMAPHORE)
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
struct gops_sync_sema {
u32 (*get_wait_cmd_size)(void);