mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: add cycle stats to debugger node
Add NVGPU_DBG_GPU_IOCTL_CYCLE_STATS to debugger node, to install/uninstall a buffer for cycle stats. Add NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT to debugger node, to attach/flush/detach a buffer for Mode-E streamout. Those ioctls will apply to the first channel in the debug session. Bug 200464613 Jira NVGPU-1442 Change-Id: I0b96d9a07c016690140292fa5886fda545697ee6 Signed-off-by: Thomas Fleury <tfleury@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2002060 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
efdcce03d3
commit
90b0bf98ac
@@ -130,8 +130,7 @@ void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch)
|
||||
nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
|
||||
}
|
||||
|
||||
static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
|
||||
struct nvgpu_cycle_stats_args *args)
|
||||
int gk20a_channel_cycle_stats(struct channel_gk20a *ch, int dmabuf_fd)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
void *virtual_address;
|
||||
@@ -141,10 +140,10 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
|
||||
if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS))
|
||||
return -ENOSYS;
|
||||
|
||||
if (args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
|
||||
if (dmabuf_fd && !priv->cyclestate_buffer_handler) {
|
||||
|
||||
/* set up new cyclestats buffer */
|
||||
dmabuf = dma_buf_get(args->dmabuf_fd);
|
||||
dmabuf = dma_buf_get(dmabuf_fd);
|
||||
if (IS_ERR(dmabuf))
|
||||
return PTR_ERR(dmabuf);
|
||||
virtual_address = dma_buf_vmap(dmabuf);
|
||||
@@ -156,12 +155,12 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
|
||||
ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
|
||||
return 0;
|
||||
|
||||
} else if (!args->dmabuf_fd && priv->cyclestate_buffer_handler) {
|
||||
} else if (!dmabuf_fd && priv->cyclestate_buffer_handler) {
|
||||
gk20a_channel_free_cycle_stats_buffer(ch);
|
||||
return 0;
|
||||
|
||||
} else if (!args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
|
||||
/* no requst from GL */
|
||||
} else if (!dmabuf_fd && !priv->cyclestate_buffer_handler) {
|
||||
/* no request from GL */
|
||||
return 0;
|
||||
|
||||
} else {
|
||||
@@ -170,7 +169,7 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
|
||||
}
|
||||
}
|
||||
|
||||
static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
|
||||
int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@@ -184,7 +183,7 @@ static int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
|
||||
int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
|
||||
u32 dmabuf_fd,
|
||||
u32 perfmon_id_count,
|
||||
u32 *perfmon_id_start)
|
||||
@@ -1289,7 +1288,7 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
break;
|
||||
}
|
||||
err = gk20a_channel_cycle_stats(ch,
|
||||
(struct nvgpu_cycle_stats_args *)buf);
|
||||
((struct nvgpu_cycle_stats_args *)buf)->dmabuf_fd);
|
||||
gk20a_idle(ch->g);
|
||||
break;
|
||||
#endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -36,9 +36,16 @@ long gk20a_channel_ioctl(struct file *filp,
|
||||
int gk20a_channel_open_ioctl(struct gk20a *g,
|
||||
struct nvgpu_channel_open_args *args);
|
||||
|
||||
int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
|
||||
int gk20a_channel_cycle_stats(struct channel_gk20a *ch, int dmabuf_fd);
|
||||
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
|
||||
|
||||
int gk20a_attach_cycle_stats_snapshot(struct channel_gk20a *ch,
|
||||
u32 dmabuf_fd,
|
||||
u32 perfmon_id_count,
|
||||
u32 *perfmon_id_start);
|
||||
int gk20a_flush_cycle_stats_snapshot(struct channel_gk20a *ch);
|
||||
int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
|
||||
|
||||
extern const struct file_operations gk20a_channel_ops;
|
||||
|
||||
u32 nvgpu_get_common_runlist_level(u32 level);
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
#include "os_linux.h"
|
||||
#include "platform_gk20a.h"
|
||||
#include "ioctl_dbg.h"
|
||||
#include "ioctl_channel.h"
|
||||
#include "dmabuf_vidmem.h"
|
||||
|
||||
struct dbg_session_gk20a_linux {
|
||||
@@ -1855,6 +1856,87 @@ static int nvgpu_dbg_gpu_set_sm_exception_type_mask(struct dbg_session_gk20a *db
|
||||
return err;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||
static int nvgpu_dbg_gpu_cycle_stats(struct dbg_session_gk20a *dbg_s,
|
||||
struct nvgpu_dbg_gpu_cycle_stats_args *args)
|
||||
{
|
||||
struct channel_gk20a *ch = NULL;
|
||||
int err;
|
||||
|
||||
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
|
||||
if (ch == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = gk20a_busy(ch->g);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
err = gk20a_channel_cycle_stats(ch, args->dmabuf_fd);
|
||||
|
||||
gk20a_idle(ch->g);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nvgpu_dbg_gpu_cycle_stats_snapshot(struct dbg_session_gk20a *dbg_s,
|
||||
struct nvgpu_dbg_gpu_cycle_stats_snapshot_args *args)
|
||||
{
|
||||
struct channel_gk20a *ch = NULL;
|
||||
int err;
|
||||
|
||||
if (!args->dmabuf_fd) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nvgpu_speculation_barrier();
|
||||
|
||||
ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
|
||||
if (ch == NULL) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* is it allowed to handle calls for current GPU? */
|
||||
if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT)) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
err = gk20a_busy(ch->g);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
/* handle the command (most frequent cases first) */
|
||||
switch (args->cmd) {
|
||||
case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH:
|
||||
err = gk20a_flush_cycle_stats_snapshot(ch);
|
||||
args->extra = 0;
|
||||
break;
|
||||
|
||||
case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH:
|
||||
err = gk20a_attach_cycle_stats_snapshot(ch,
|
||||
args->dmabuf_fd,
|
||||
args->extra,
|
||||
&args->extra);
|
||||
break;
|
||||
|
||||
case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_DETACH:
|
||||
err = gk20a_channel_free_cycle_stats_snapshot(ch);
|
||||
args->extra = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
pr_err("cyclestats: unknown command %u\n", args->cmd);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
gk20a_idle(ch->g);
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct nvgpu_os_linux *l = container_of(inode->i_cdev,
|
||||
@@ -2011,6 +2093,18 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
|
||||
(struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args *)buf);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_GK20A_CYCLE_STATS
|
||||
case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS:
|
||||
err = nvgpu_dbg_gpu_cycle_stats(dbg_s,
|
||||
(struct nvgpu_dbg_gpu_cycle_stats_args *)buf);
|
||||
break;
|
||||
|
||||
case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT:
|
||||
err = nvgpu_dbg_gpu_cycle_stats_snapshot(dbg_s,
|
||||
(struct nvgpu_dbg_gpu_cycle_stats_snapshot_args *)buf);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
nvgpu_err(g,
|
||||
"unrecognized dbg gpu ioctl cmd: 0x%x",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* NVGPU Public Interface Header
|
||||
*
|
||||
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@@ -1421,8 +1421,33 @@ struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args {
|
||||
_IOW(NVGPU_DBG_GPU_IOCTL_MAGIC, 23, \
|
||||
struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args)
|
||||
|
||||
struct nvgpu_dbg_gpu_cycle_stats_args {
|
||||
__u32 dmabuf_fd;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
#define NVGPU_DBG_GPU_IOCTL_CYCLE_STATS \
|
||||
_IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 24, struct nvgpu_dbg_gpu_cycle_stats_args)
|
||||
|
||||
/* cycle stats snapshot buffer support for mode E */
|
||||
struct nvgpu_dbg_gpu_cycle_stats_snapshot_args {
|
||||
__u32 cmd; /* in: command to handle */
|
||||
__u32 dmabuf_fd; /* in: dma buffer handler */
|
||||
__u32 extra; /* in/out: extra payload e.g.*/
|
||||
/* counter/start perfmon */
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
/* valid commands to control cycle stats shared buffer */
|
||||
#define NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH 0
|
||||
#define NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH 1
|
||||
#define NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_DETACH 2
|
||||
|
||||
#define NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT \
|
||||
_IOWR(NVGPU_DBG_GPU_IOCTL_MAGIC, 25, struct nvgpu_dbg_gpu_cycle_stats_snapshot_args)
|
||||
|
||||
#define NVGPU_DBG_GPU_IOCTL_LAST \
|
||||
_IOC_NR(NVGPU_DBG_GPU_IOCTL_SET_SM_EXCEPTION_TYPE_MASK)
|
||||
_IOC_NR(NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT)
|
||||
|
||||
#define NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE \
|
||||
sizeof(struct nvgpu_dbg_gpu_access_fb_memory_args)
|
||||
|
||||
Reference in New Issue
Block a user