gpu: nvgpu: polling loops should not use gr idle timeouts

Rename GR_IDLE_CHECK_DEFAULT to POLL_DELAY_MIN_US
Rename GR_IDLE_CHECK_MAX to POLL_DELAY_MAX_US

JIRA NVGPU-1313

Change-Id: I1f645cbbc49298f9afdeb3a3d5e61a75d11b7c25
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083167
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-03-27 13:44:41 -07:00
committed by mobile promotions
parent a8587d5ee3
commit b7835b5ead
16 changed files with 35 additions and 34 deletions

View File

@@ -401,7 +401,7 @@ int nvgpu_engine_disable_activity_all(struct gk20a *g,
int nvgpu_engine_wait_for_idle(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
int ret = 0;
u32 i, host_num_engines;
struct nvgpu_engine_status_info engine_status;
@@ -426,7 +426,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32,
delay << 1, GR_IDLE_CHECK_MAX);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {

View File

@@ -198,7 +198,7 @@ void gk20a_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
int ret = -ETIMEDOUT;
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
@@ -212,7 +212,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {

View File

@@ -75,7 +75,7 @@ void tu104_runlist_hw_submit(struct gk20a *g, u32 runlist_id,
int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
int ret = -ETIMEDOUT;
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
@@ -93,7 +93,7 @@ int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return ret;

View File

@@ -50,7 +50,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
int err = 0;
struct nvgpu_firmware *nvgpu_minion_fw = NULL;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
bool boot_cmplte;
nvgpu_log_fn(g, " ");
@@ -107,7 +107,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion boot timeout") == 0);

View File

@@ -1226,7 +1226,7 @@ int pmu_wait_message_cond_status(struct nvgpu_pmu *pmu, u32 timeout_ms,
struct gk20a *g = gk20a_from_pmu(pmu);
struct nvgpu_timeout timeout;
int err;
unsigned int delay = GR_IDLE_CHECK_DEFAULT;
unsigned int delay = POLL_DELAY_MIN_US;
err = nvgpu_timeout_init(g, &timeout, timeout_ms,
NVGPU_TIMER_CPU_TIMER);
@@ -1247,7 +1247,7 @@ int pmu_wait_message_cond_status(struct nvgpu_pmu *pmu, u32 timeout_ms,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;

View File

@@ -435,7 +435,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
{
struct gk20a *g = sec2->g;
struct nvgpu_timeout timeout;
unsigned long delay = GR_IDLE_CHECK_DEFAULT;
unsigned long delay = POLL_DELAY_MIN_US;
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
@@ -449,7 +449,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1U, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1U, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
return -ETIMEDOUT;

View File

@@ -1238,7 +1238,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
unsigned int id_type)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
int ret = -EBUSY;
nvgpu_timeout_init(g, &timeout, gk20a_fifo_get_preempt_timeout(g),
@@ -1251,7 +1251,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {

View File

@@ -316,7 +316,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
nvgpu_log_fn(g, " ");
if (sleepduringwait) {
delay = GR_IDLE_CHECK_DEFAULT;
delay = POLL_DELAY_MIN_US;
}
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
@@ -408,7 +408,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
if (sleepduringwait) {
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} else {
nvgpu_udelay(delay);
}
@@ -5047,7 +5047,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
{
bool locked_down;
bool no_error_pending;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
bool mmu_debug_mode_enabled = g->ops.fb.is_debug_mode_enabled(g);
u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc);
u32 dbgr_status0 = 0, dbgr_control0 = 0;
@@ -5097,7 +5097,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
dbgr_control0 = gk20a_readl(g,

View File

@@ -32,8 +32,6 @@
#include <nvgpu/comptags.h>
#include <nvgpu/cond.h>
#define GR_IDLE_CHECK_DEFAULT 10U /* usec */
#define GR_IDLE_CHECK_MAX 200U /* usec */
#define GR_FECS_POLL_INTERVAL 5U /* usec */
#define INVALID_MAX_WAYS 0xFFFFFFFFU

View File

@@ -60,7 +60,7 @@ static inline u32 gm20b_engine_id_to_mmu_id(struct gk20a *g, u32 engine_id)
void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
unsigned long engine_ids)
{
unsigned long delay = GR_IDLE_CHECK_DEFAULT;
unsigned long delay = POLL_DELAY_MIN_US;
unsigned long engine_id;
int ret;
struct nvgpu_timeout timeout;
@@ -95,7 +95,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
}
nvgpu_usleep_range(delay, delay * 2UL);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {

View File

@@ -1468,7 +1468,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
struct dbg_session_gk20a *dbg_s,
int *ctx_resident_ch_fd)
{
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
bool cilp_preempt_pending = false;
struct channel_gk20a *cilp_preempt_pending_ch = NULL;
struct channel_gk20a *ch;
@@ -1537,7 +1537,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
/* If cilp is still pending at this point, timeout */

View File

@@ -149,7 +149,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
u32 pbdma_id)
{
struct nvgpu_timeout timeout;
unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
unsigned long delay = POLL_DELAY_MIN_US; /* in micro seconds */
int ret;
unsigned int loop_count = 0;
struct nvgpu_pbdma_status_info pbdma_status;
@@ -225,7 +225,7 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id,
nvgpu_usleep_range(delay, delay * 2UL);
delay = min_t(unsigned long,
delay << 1, GR_IDLE_CHECK_MAX);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
@@ -240,7 +240,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
u32 act_eng_id, u32 *reset_eng_bitmask)
{
struct nvgpu_timeout timeout;
unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */
unsigned long delay = POLL_DELAY_MIN_US; /* in micro seconds */
u32 eng_stat;
u32 ctx_stat;
int ret;
@@ -359,7 +359,7 @@ static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id,
}
nvgpu_usleep_range(delay, delay * 2UL);
delay = min_t(unsigned long,
delay << 1, GR_IDLE_CHECK_MAX);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {

View File

@@ -2835,7 +2835,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
{
bool locked_down;
bool no_error_pending;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
bool mmu_debug_mode_enabled = g->ops.fb.is_debug_mode_enabled(g);
u32 dbgr_status0 = 0;
u32 warp_esr, global_esr;
@@ -2915,7 +2915,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired(&timeout) == 0);
nvgpu_err(g, "GPC%d TPC%d: timed out while trying to "

View File

@@ -359,7 +359,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
} else {
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
@@ -383,7 +383,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
fault_status = g->ops.fb.read_mmu_fault_status(g);
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
delay = min_t(u32, delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired_msg(&timeout,
"fault status busy set") == 0);
}

View File

@@ -102,7 +102,7 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
u32 delay = POLL_DELAY_MIN_US;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
@@ -134,7 +134,7 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(unsigned int,
delay << 1, GR_IDLE_CHECK_MAX);
delay << 1, POLL_DELAY_MAX_US);
} while (nvgpu_timeout_expired_msg(&timeout,
"minion cmd timeout") == 0);

View File

@@ -2140,6 +2140,9 @@ static inline bool nvgpu_is_timeouts_enabled(struct gk20a *g)
return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0;
}
#define POLL_DELAY_MIN_US 10U
#define POLL_DELAY_MAX_US 200U
static inline u32 nvgpu_get_poll_timeout(struct gk20a *g)
{
return nvgpu_is_timeouts_enabled(g) ?