gpu: nvgpu: Add timeouts_disabled_refcount for enabling timeout

-timeouts will be enabled only when timeouts_disabled_refcount
 will reach 0
-timeouts_enabled debugfs will change from u32 type to file type
 to avoid race enabling/disabling timeout from debugfs and ioctl
-unify setting timeouts_enabled from debugfs and ioctl

Bug 1982434

Change-Id: I54bab778f1ae533872146dfb8d80deafd2a685c7
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1588690
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2017-10-30 14:15:51 -07:00
committed by mobile promotions
parent ac687c95d3
commit 982fcfa737
7 changed files with 91 additions and 29 deletions

View File

@@ -279,6 +279,59 @@ static int gk20a_railgating_debugfs_init(struct gk20a *g)
return 0; return 0;
} }
static ssize_t timeouts_enabled_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[3];
struct gk20a *g = file->private_data;
if (nvgpu_is_timeouts_enabled(g))
buf[0] = 'Y';
else
buf[0] = 'N';
buf[1] = '\n';
buf[2] = 0x00;
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t timeouts_enabled_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[3];
int buf_size;
bool timeouts_enabled;
struct gk20a *g = file->private_data;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
if (strtobool(buf, &timeouts_enabled) == 0) {
nvgpu_mutex_acquire(&g->dbg_sessions_lock);
if (timeouts_enabled == false) {
/* requesting to disable timeouts */
if (g->timeouts_disabled_by_user == false) {
nvgpu_atomic_inc(&g->timeouts_disabled_refcount);
g->timeouts_disabled_by_user = true;
}
} else {
/* requesting to enable timeouts */
if (g->timeouts_disabled_by_user == true) {
nvgpu_atomic_dec(&g->timeouts_disabled_refcount);
g->timeouts_disabled_by_user = false;
}
}
nvgpu_mutex_release(&g->dbg_sessions_lock);
}
return count;
}
static const struct file_operations timeouts_enabled_fops = {
.open = simple_open,
.read = timeouts_enabled_read,
.write = timeouts_enabled_write,
};
void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink) void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
{ {
@@ -323,10 +376,11 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
S_IRUGO|S_IWUSR, l->debugfs, S_IRUGO|S_IWUSR, l->debugfs,
&g->gr_idle_timeout_default); &g->gr_idle_timeout_default);
l->debugfs_timeouts_enabled = l->debugfs_timeouts_enabled =
debugfs_create_bool("timeouts_enabled", debugfs_create_file("timeouts_enabled",
S_IRUGO|S_IWUSR, S_IRUGO|S_IWUSR,
l->debugfs, l->debugfs,
&g->timeouts_enabled); g,
&timeouts_enabled_fops);
l->debugfs_disable_bigpage = l->debugfs_disable_bigpage =
debugfs_create_file("disable_bigpage", debugfs_create_file("disable_bigpage",

View File

@@ -96,12 +96,15 @@ static void nvgpu_init_timeout(struct gk20a *g)
{ {
struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g)); struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
g->timeouts_disabled_by_user = false;
nvgpu_atomic_set(&g->timeouts_disabled_refcount, 0);
if (nvgpu_platform_is_silicon(g)) {
g->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT; g->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT;
if (nvgpu_platform_is_silicon(g)) } else if (nvgpu_platform_is_fpga(g)) {
g->timeouts_enabled = true;
else if (nvgpu_platform_is_fpga(g)) {
g->gr_idle_timeout_default = GK20A_TIMEOUT_FPGA; g->gr_idle_timeout_default = GK20A_TIMEOUT_FPGA;
g->timeouts_enabled = true; } else {
g->gr_idle_timeout_default = (u32)ULONG_MAX;
} }
g->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms; g->ch_wdt_timeout_ms = platform->ch_wdt_timeout_ms;
} }

View File

@@ -228,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
int err; int err;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
nvgpu_log_fn(g, "powergate mode = %d", args->enable); nvgpu_log(g, gpu_dbg_fn, "timeout enable/disable = %d", args->enable);
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
@@ -385,18 +385,14 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
switch (timeout_mode) { switch (timeout_mode) {
case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE: case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
if (dbg_s->is_timeout_disabled && if (dbg_s->is_timeout_disabled == true)
--g->dbg_timeout_disabled_refcount == 0) { nvgpu_atomic_dec(&g->timeouts_disabled_refcount);
g->timeouts_enabled = true;
}
dbg_s->is_timeout_disabled = false; dbg_s->is_timeout_disabled = false;
break; break;
case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE: case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
if ((dbg_s->is_timeout_disabled == false) && if (dbg_s->is_timeout_disabled == false)
(g->dbg_timeout_disabled_refcount++ == 0)) { nvgpu_atomic_inc(&g->timeouts_disabled_refcount);
g->timeouts_enabled = false;
}
dbg_s->is_timeout_disabled = true; dbg_s->is_timeout_disabled = true;
break; break;
@@ -408,9 +404,11 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
break; break;
} }
nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s", if (!err)
g->timeouts_enabled ? "Yes" : "No"); nvgpu_log(g, gpu_dbg_gpu_dbg, "dbg is timeout disabled %s, "
"timeouts disabled refcount %d",
dbg_s->is_timeout_disabled ? "true" : "false",
nvgpu_atomic_read(&g->timeouts_disabled_refcount));
return err; return err;
} }
@@ -1598,11 +1596,11 @@ static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s, static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
struct nvgpu_dbg_gpu_timeout_args *args) struct nvgpu_dbg_gpu_timeout_args *args)
{ {
int status; bool status;
struct gk20a *g = dbg_s->g; struct gk20a *g = dbg_s->g;
nvgpu_mutex_acquire(&g->dbg_sessions_lock); nvgpu_mutex_acquire(&g->dbg_sessions_lock);
status = g->timeouts_enabled; status = nvgpu_is_timeouts_enabled(g);
nvgpu_mutex_release(&g->dbg_sessions_lock); nvgpu_mutex_release(&g->dbg_sessions_lock);
if (status) if (status)

View File

@@ -424,7 +424,8 @@ int vgpu_probe(struct platform_device *pdev)
dma_set_max_seg_size(dev, UINT_MAX); dma_set_max_seg_size(dev, UINT_MAX);
gk20a->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT; gk20a->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT;
gk20a->timeouts_enabled = true; gk20a->timeouts_disabled_by_user = false;
nvgpu_atomic_set(&gk20a->timeouts_disabled_refcount, 0);
vgpu_create_sysfs(dev); vgpu_create_sysfs(dev);
gk20a_init_gr(gk20a); gk20a_init_gr(gk20a);

View File

@@ -1158,7 +1158,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c,
} }
} }
if (!c->g->timeouts_enabled || !c->timeout.enabled) if (!nvgpu_is_timeouts_enabled(c->g) || !c->timeout.enabled)
acquire_timeout = 0; acquire_timeout = 0;
else else
acquire_timeout = c->timeout.limit_ms; acquire_timeout = c->timeout.limit_ms;
@@ -1266,7 +1266,7 @@ bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
ch->timeout_gpfifo_get = gpfifo_get; ch->timeout_gpfifo_get = gpfifo_get;
return ch->g->timeouts_enabled && return nvgpu_is_timeouts_enabled(ch->g) &&
ch->timeout_accumulated_ms > ch->timeout_ms_max; ch->timeout_accumulated_ms > ch->timeout_ms_max;
} }
@@ -1303,7 +1303,7 @@ static void __gk20a_channel_timeout_start(struct channel_gk20a *ch)
*/ */
static void gk20a_channel_timeout_start(struct channel_gk20a *ch) static void gk20a_channel_timeout_start(struct channel_gk20a *ch)
{ {
if (!ch->g->timeouts_enabled) if (!nvgpu_is_timeouts_enabled(ch->g))
return; return;
if (!ch->timeout.enabled) if (!ch->timeout.enabled)

View File

@@ -2482,7 +2482,7 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
val &= ~pbdma_acquire_timeout_en_enable_f(); val &= ~pbdma_acquire_timeout_en_enable_f();
gk20a_writel(g, pbdma_acquire_r(pbdma_id), val); gk20a_writel(g, pbdma_acquire_r(pbdma_id), val);
if (g->timeouts_enabled) { if (nvgpu_is_timeouts_enabled(g)) {
rc_type = RC_TYPE_PBDMA_FAULT; rc_type = RC_TYPE_PBDMA_FAULT;
nvgpu_err(g, nvgpu_err(g,
"semaphore acquire timeout!"); "semaphore acquire timeout!");

View File

@@ -1314,7 +1314,7 @@ struct gk20a {
struct railgate_stats pstats; struct railgate_stats pstats;
#endif #endif
u32 gr_idle_timeout_default; u32 gr_idle_timeout_default;
bool timeouts_enabled; bool timeouts_disabled_by_user;
unsigned int ch_wdt_timeout_ms; unsigned int ch_wdt_timeout_ms;
struct nvgpu_mutex poweron_lock; struct nvgpu_mutex poweron_lock;
@@ -1376,7 +1376,8 @@ struct gk20a {
/* also prevents debug sessions from attaching until released */ /* also prevents debug sessions from attaching until released */
struct nvgpu_mutex dbg_sessions_lock; struct nvgpu_mutex dbg_sessions_lock;
int dbg_powergating_disabled_refcount; /*refcount for pg disable */ int dbg_powergating_disabled_refcount; /*refcount for pg disable */
int dbg_timeout_disabled_refcount; /*refcount for timeout disable */ /*refcount for timeout disable */
nvgpu_atomic_t timeouts_disabled_refcount;
/* must have dbg_sessions_lock before use */ /* must have dbg_sessions_lock before use */
struct nvgpu_dbg_reg_op *dbg_regops_tmp_buf; struct nvgpu_dbg_reg_op *dbg_regops_tmp_buf;
@@ -1508,9 +1509,14 @@ struct gk20a {
struct nvgpu_list_node boardobjgrp_head; struct nvgpu_list_node boardobjgrp_head;
}; };
static inline bool nvgpu_is_timeouts_enabled(struct gk20a *g)
{
return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0;
}
static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g) static inline unsigned long gk20a_get_gr_idle_timeout(struct gk20a *g)
{ {
return g->timeouts_enabled ? return nvgpu_is_timeouts_enabled(g) ?
g->gr_idle_timeout_default : ULONG_MAX; g->gr_idle_timeout_default : ULONG_MAX;
} }