gpu: nvgpu: move cycle state buffer handler to linux

We use dma_buf pointer cyclestate_buffer_handler in common code
But since this is linux specific, we need to move this out of common code and
into linux specific code

Move dma_buf pointer cyclestate_buffer_handler from common channel code to
struct nvgpu_channel_linux

Fix all pointer accesses to this handle

Move gk20a_channel_free_cycle_stats_buffer() to ioctl_channel.c since it is
mostly linux specific
And since gk20a_channel_free_cycle_stats_buffer() needs to be called while
closing the channel, call it from nvgpu_channel_close_linux()

Jira NVGPU-397
Jira NVGPU-415

Change-Id: Ifb429e49b8f7a1c9e2bc757f3efdd50b28ceca1f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1603909
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-11-23 03:59:14 -08:00
committed by mobile promotions
parent 861b11a968
commit ce06f74d6b
6 changed files with 30 additions and 33 deletions

View File

@@ -245,6 +245,7 @@ static void nvgpu_channel_close_linux(struct channel_gk20a *ch)
nvgpu_channel_work_completion_clear(ch);
#if defined(CONFIG_GK20A_CYCLE_STATS)
gk20a_channel_free_cycle_stats_buffer(ch);
gk20a_channel_free_cycle_stats_snapshot(ch);
#endif
}

View File

@@ -17,6 +17,7 @@
#define __NVGPU_CHANNEL_H__
#include <linux/workqueue.h>
#include <linux/dma-buf.h>
#include <nvgpu/types.h>
@@ -56,6 +57,8 @@ struct nvgpu_channel_linux {
struct nvgpu_channel_completion_cb completion_cb;
struct nvgpu_error_notifier error_notifier;
struct dma_buf *cyclestate_buffer_handler;
};
int nvgpu_init_channel_support_linux(struct nvgpu_os_linux *l);

View File

@@ -102,17 +102,35 @@ struct channel_priv {
#if defined(CONFIG_GK20A_CYCLE_STATS)
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch)
{
struct nvgpu_channel_linux *priv = ch->os_priv;
/* disable existing cyclestats buffer */
nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
if (priv->cyclestate_buffer_handler) {
dma_buf_vunmap(priv->cyclestate_buffer_handler,
ch->cyclestate.cyclestate_buffer);
dma_buf_put(priv->cyclestate_buffer_handler);
priv->cyclestate_buffer_handler = NULL;
ch->cyclestate.cyclestate_buffer = NULL;
ch->cyclestate.cyclestate_buffer_size = 0;
}
nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
}
static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
struct nvgpu_cycle_stats_args *args)
{
struct dma_buf *dmabuf;
void *virtual_address;
struct nvgpu_channel_linux *priv = ch->os_priv;
/* is it allowed to handle calls for current GPU? */
if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS))
return -ENOSYS;
if (args->dmabuf_fd && !ch->cyclestate.cyclestate_buffer_handler) {
if (args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
/* set up new cyclestats buffer */
dmabuf = dma_buf_get(args->dmabuf_fd);
@@ -122,18 +140,16 @@ static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
if (!virtual_address)
return -ENOMEM;
ch->cyclestate.cyclestate_buffer_handler = dmabuf;
priv->cyclestate_buffer_handler = dmabuf;
ch->cyclestate.cyclestate_buffer = virtual_address;
ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
return 0;
} else if (!args->dmabuf_fd &&
ch->cyclestate.cyclestate_buffer_handler) {
} else if (!args->dmabuf_fd && priv->cyclestate_buffer_handler) {
gk20a_channel_free_cycle_stats_buffer(ch);
return 0;
} else if (!args->dmabuf_fd &&
!ch->cyclestate.cyclestate_buffer_handler) {
} else if (!args->dmabuf_fd && !priv->cyclestate_buffer_handler) {
/* no requst from GL */
return 0;

View File

@@ -37,6 +37,7 @@ int gk20a_channel_open_ioctl(struct gk20a *g,
struct nvgpu_channel_open_args *args);
int gk20a_channel_free_cycle_stats_snapshot(struct channel_gk20a *ch);
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
extern const struct file_operations gk20a_event_id_ops;
extern const struct file_operations gk20a_channel_ops;

View File

@@ -360,24 +360,6 @@ static void gk20a_wait_until_counter_is_N(
}
}
#if defined(CONFIG_GK20A_CYCLE_STATS)
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch)
{
/* disable existing cyclestats buffer */
nvgpu_mutex_acquire(&ch->cyclestate.cyclestate_buffer_mutex);
if (ch->cyclestate.cyclestate_buffer_handler) {
dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler,
ch->cyclestate.cyclestate_buffer);
dma_buf_put(ch->cyclestate.cyclestate_buffer_handler);
ch->cyclestate.cyclestate_buffer_handler = NULL;
ch->cyclestate.cyclestate_buffer = NULL;
ch->cyclestate.cyclestate_buffer_size = 0;
}
nvgpu_mutex_release(&ch->cyclestate.cyclestate_buffer_mutex);
}
#endif
/* call ONLY when no references to the channel exist: after the last put */
static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
{
@@ -490,10 +472,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
nvgpu_big_free(g, ch->gpfifo.pipe);
memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc));
#if defined(CONFIG_GK20A_CYCLE_STATS)
gk20a_channel_free_cycle_stats_buffer(ch);
#endif
channel_gk20a_free_priv_cmdbuf(ch);
/* sync must be destroyed before releasing channel vm */

View File

@@ -253,10 +253,9 @@ struct channel_gk20a {
#if defined(CONFIG_GK20A_CYCLE_STATS)
struct {
void *cyclestate_buffer;
u32 cyclestate_buffer_size;
struct dma_buf *cyclestate_buffer_handler;
struct nvgpu_mutex cyclestate_buffer_mutex;
void *cyclestate_buffer;
u32 cyclestate_buffer_size;
struct nvgpu_mutex cyclestate_buffer_mutex;
} cyclestate;
struct nvgpu_mutex cs_client_mutex;
@@ -370,7 +369,6 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
unsigned int num_entries,
unsigned int num_inflight_jobs,
u32 flags);
void gk20a_channel_free_cycle_stats_buffer(struct channel_gk20a *ch);
void gk20a_channel_timeout_restart_all_channels(struct gk20a *g);