gpu: nvgpu: rename gr_idle_timeout_default to poll_timeout_default

Rename gr_idle_timeout_default to poll_timeout_default

Rename NVGPU_DEFAULT_GR_IDLE_TIMEOUT to
NVGPU_DEFAULT_POLL_TIMEOUT_MS

Rename gk20a_get_gr_idle_timeout to nvgpu_get_poll_timeout

JIRA NVGPU-1313

Change-Id: I17314f0fa4a386f806f6940073649a9082ee21ad
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2083130
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Seema Khowala
2019-03-27 13:16:56 -07:00
committed by mobile promotions
parent 9b728a06c9
commit a8587d5ee3
37 changed files with 70 additions and 70 deletions

View File

@@ -1022,7 +1022,7 @@ static int boardobjgrp_pmucmdsend(struct gk20a *g,
goto boardobjgrp_pmucmdsend_exit; goto boardobjgrp_pmucmdsend_exit;
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handlerparams.success, 1); &handlerparams.success, 1);
if (handlerparams.success == 0U) { if (handlerparams.success == 0U) {
nvgpu_err(g, "could not process cmd"); nvgpu_err(g, "could not process cmd");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -105,7 +105,7 @@ int gk20a_ce_execute_ops(struct gk20a *g,
&ce_ctx->postfences[ce_ctx->cmd_buf_read_queue_offset]; &ce_ctx->postfences[ce_ctx->cmd_buf_read_queue_offset];
ret = gk20a_fence_wait(g, *prev_post_fence, ret = gk20a_fence_wait(g, *prev_post_fence,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
gk20a_fence_put(*prev_post_fence); gk20a_fence_put(*prev_post_fence);
*prev_post_fence = NULL; *prev_post_fence = NULL;

View File

@@ -270,7 +270,7 @@ int gk20a_wait_channel_idle(struct channel_gk20a *ch)
bool channel_idle = false; bool channel_idle = false;
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
nvgpu_timeout_init(ch->g, &timeout, gk20a_get_gr_idle_timeout(ch->g), nvgpu_timeout_init(ch->g, &timeout, nvgpu_get_poll_timeout(ch->g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {
@@ -326,7 +326,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
struct gk20a *g = ch->g; struct gk20a *g = ch->g;
struct fifo_gk20a *f = &g->fifo; struct fifo_gk20a *f = &g->fifo;
struct vm_gk20a *ch_vm = ch->vm; struct vm_gk20a *ch_vm = ch->vm;
unsigned long timeout = gk20a_get_gr_idle_timeout(g); unsigned long timeout = nvgpu_get_poll_timeout(g);
struct dbg_session_gk20a *dbg_s; struct dbg_session_gk20a *dbg_s;
struct dbg_session_data *session_data, *tmp_s; struct dbg_session_data *session_data, *tmp_s;
struct dbg_session_channel_data *ch_data, *tmp; struct dbg_session_channel_data *ch_data, *tmp;
@@ -742,7 +742,7 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
ch->ctxsw_timeout_accumulated_ms = 0; ch->ctxsw_timeout_accumulated_ms = 0;
ch->ctxsw_timeout_gpfifo_get = 0; ch->ctxsw_timeout_gpfifo_get = 0;
/* set gr host default timeout */ /* set gr host default timeout */
ch->ctxsw_timeout_max_ms = gk20a_get_gr_idle_timeout(g); ch->ctxsw_timeout_max_ms = nvgpu_get_poll_timeout(g);
ch->ctxsw_timeout_debug_dump = true; ch->ctxsw_timeout_debug_dump = true;
ch->unserviceable = false; ch->unserviceable = false;

View File

@@ -411,7 +411,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
host_num_engines = host_num_engines =
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
for (i = 0; i < host_num_engines; i++) { for (i = 0; i < host_num_engines; i++) {

View File

@@ -201,7 +201,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
u32 delay = GR_IDLE_CHECK_DEFAULT; u32 delay = GR_IDLE_CHECK_DEFAULT;
int ret = -ETIMEDOUT; int ret = -ETIMEDOUT;
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {

View File

@@ -78,7 +78,7 @@ int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
u32 delay = GR_IDLE_CHECK_DEFAULT; u32 delay = GR_IDLE_CHECK_DEFAULT;
int ret = -ETIMEDOUT; int ret = -ETIMEDOUT;
ret = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
if (ret != 0) { if (ret != 0) {
return ret; return ret;

View File

@@ -120,7 +120,7 @@ void nvgpu_gr_flush_channel_tlb(struct gk20a *g)
u32 nvgpu_gr_get_idle_timeout(struct gk20a *g) u32 nvgpu_gr_get_idle_timeout(struct gk20a *g)
{ {
return nvgpu_is_timeouts_enabled(g) ? return nvgpu_is_timeouts_enabled(g) ?
g->gr_idle_timeout_default : UINT_MAX; g->poll_timeout_default : UINT_MAX;
} }
int nvgpu_gr_init_fs_state(struct gk20a *g) int nvgpu_gr_init_fs_state(struct gk20a *g)

View File

@@ -120,12 +120,12 @@ static int __nvgpu_vidmem_do_clear_all(struct gk20a *g)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
nvgpu_timeout_init(g, &timeout, nvgpu_timeout_init(g, &timeout,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {
err = gk20a_fence_wait(g, gk20a_fence_out, err = gk20a_fence_wait(g, gk20a_fence_out,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
} while (err == -ERESTARTSYS && } while (err == -ERESTARTSYS &&
!nvgpu_timeout_expired(&timeout)); !nvgpu_timeout_expired(&timeout));
@@ -463,12 +463,12 @@ int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
nvgpu_timeout_init(g, &timeout, nvgpu_timeout_init(g, &timeout,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {
err = gk20a_fence_wait(g, gk20a_last_fence, err = gk20a_fence_wait(g, gk20a_last_fence,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
} while (err == -ERESTARTSYS && } while (err == -ERESTARTSYS &&
!nvgpu_timeout_expired(&timeout)); !nvgpu_timeout_expired(&timeout));

View File

@@ -89,7 +89,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
goto exit; goto exit;
} }
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Minion boot timeout init failed"); nvgpu_err(g, "Minion boot timeout init failed");

View File

@@ -115,7 +115,7 @@ int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload)
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {
nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed"); nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed");
@@ -176,7 +176,7 @@ int clk_freq_effective_avg(struct gk20a *g, u32 *freqkHz, u32 clkDomainMask) {
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {
nvgpu_err(g, "rpc call to get clk frequency average failed"); nvgpu_err(g, "rpc call to get clk frequency average failed");
@@ -283,7 +283,7 @@ int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx)
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {
@@ -346,7 +346,7 @@ int nvgpu_clk_pmu_vin_load(struct gk20a *g)
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {
@@ -408,7 +408,7 @@ int nvgpu_clk_pmu_clk_domains_load(struct gk20a *g)
} }
(void) pmu_wait_message_cond(&g->pmu, (void) pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {
@@ -558,7 +558,7 @@ static int clk_pmu_vf_inject(struct gk20a *g,
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handler.success, 1); &handler.success, 1);
if (handler.success == 0U) { if (handler.success == 0U) {

View File

@@ -274,7 +274,7 @@ int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate)
PMU_COMMAND_QUEUE_HPQ, PMU_COMMAND_QUEUE_HPQ,
nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq); nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq);
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&ack_status, 1); &ack_status, 1);
if (ack_status == 0U) { if (ack_status == 0U) {
status = -EINVAL; status = -EINVAL;
@@ -310,7 +310,7 @@ int nvgpu_lpwr_post_init(struct gk20a *g)
PMU_COMMAND_QUEUE_LPQ, PMU_COMMAND_QUEUE_LPQ,
nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq); nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq);
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&ack_status, 1); &ack_status, 1);
if (ack_status == 0U) { if (ack_status == 0U) {
status = -EINVAL; status = -EINVAL;

View File

@@ -98,7 +98,7 @@ static int rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
} }
if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) { if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) {
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&success, 1); &success, 1);
if (success == 0U) { if (success == 0U) {
status = -EINVAL; status = -EINVAL;

View File

@@ -334,7 +334,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
/* wait if on_pending */ /* wait if on_pending */
else if (pmu->pmu_pg.elpg_stat == PMU_ELPG_STAT_ON_PENDING) { else if (pmu->pmu_pg.elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&pmu->pmu_pg.elpg_stat, PMU_ELPG_STAT_ON); &pmu->pmu_pg.elpg_stat, PMU_ELPG_STAT_ON);
if (pmu->pmu_pg.elpg_stat != PMU_ELPG_STAT_ON) { if (pmu->pmu_pg.elpg_stat != PMU_ELPG_STAT_ON) {
@@ -395,7 +395,7 @@ int nvgpu_pmu_disable_elpg(struct gk20a *g)
} }
pmu_wait_message_cond(pmu, pmu_wait_message_cond(pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
ptr, PMU_ELPG_STAT_OFF); ptr, PMU_ELPG_STAT_OFF);
if (*ptr != PMU_ELPG_STAT_OFF) { if (*ptr != PMU_ELPG_STAT_OFF) {
nvgpu_err(g, "ELPG_DISALLOW_ACK failed"); nvgpu_err(g, "ELPG_DISALLOW_ACK failed");

View File

@@ -136,7 +136,7 @@ static int pmgr_pmu_set_object(struct gk20a *g,
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handlerparams.success, 1); &handlerparams.success, 1);
if (handlerparams.success == 0U) { if (handlerparams.success == 0U) {
@@ -439,7 +439,7 @@ int pmgr_pmu_pwr_devices_query_blocking(
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handlerparams.success, 1); &handlerparams.success, 1);
if (handlerparams.success == 0U) { if (handlerparams.success == 0U) {
@@ -483,7 +483,7 @@ static int pmgr_pmu_load_blocking(struct gk20a *g)
} }
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handlerparams.success, 1); &handlerparams.success, 1);
if (handlerparams.success == 0U) { if (handlerparams.success == 0U) {

View File

@@ -283,7 +283,7 @@ int nvgpu_init_pmu_support(struct gk20a *g)
* hitting breakpoint due to PMU halt * hitting breakpoint due to PMU halt
*/ */
err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
if (err != 0) { if (err != 0) {
goto exit; goto exit;
} }
@@ -639,7 +639,7 @@ int nvgpu_pmu_wait_ready(struct gk20a *g)
int status = 0; int status = 0;
status = pmu_wait_message_cond_status(&g->pmu, status = pmu_wait_message_cond_status(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu.pmu_ready, (u8)true); &g->pmu.pmu_ready, (u8)true);
if (status != 0) { if (status != 0) {
nvgpu_err(g, "PMU is not ready yet"); nvgpu_err(g, "PMU is not ready yet");

View File

@@ -575,7 +575,7 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries)
gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE");
nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
pmu_handle_zbc_msg, pmu, &seq); pmu_handle_zbc_msg, pmu, &seq);
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&pmu->pmu_pg.zbc_save_done, 1); &pmu->pmu_pg.zbc_save_done, 1);
if (!pmu->pmu_pg.zbc_save_done) { if (!pmu->pmu_pg.zbc_save_done) {
nvgpu_err(g, "ZBC save timeout"); nvgpu_err(g, "ZBC save timeout");

View File

@@ -232,7 +232,7 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
{ {
int err = 0; int err = 0;
u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
u32 timeout = gk20a_get_gr_idle_timeout(g); u32 timeout = nvgpu_get_poll_timeout(g);
/* GM20B PMU supports loading FECS only */ /* GM20B PMU supports loading FECS only */
if (!(falconidmask == BIT32(FALCON_ID_FECS))) { if (!(falconidmask == BIT32(FALCON_ID_FECS))) {
@@ -241,7 +241,7 @@ int gm20b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
/* check whether pmu is ready to bootstrap lsf if not wait for it */ /* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1); &g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */ /* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {

View File

@@ -156,7 +156,7 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
/* check whether pmu is ready to bootstrap lsf if not wait for it */ /* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1); &g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */ /* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
@@ -168,7 +168,7 @@ int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
gp106_pmu_load_multiple_falcons(g, falconidmask, flags); gp106_pmu_load_multiple_falcons(g, falconidmask, flags);
nvgpu_assert(falconidmask < U32(U8_MAX)); nvgpu_assert(falconidmask < U32(U8_MAX));
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, U8(falconidmask)); &g->pmu_lsf_loaded_falcon_id, U8(falconidmask));
if (g->pmu_lsf_loaded_falcon_id != falconidmask) { if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT; return -ETIMEDOUT;

View File

@@ -187,7 +187,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
/* check whether pmu is ready to bootstrap lsf if not wait for it */ /* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1); &g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */ /* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
@@ -199,7 +199,7 @@ int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
gp10b_pmu_load_multiple_falcons(g, falconidmask, flags); gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
nvgpu_assert(falconidmask <= U8_MAX); nvgpu_assert(falconidmask <= U8_MAX);
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, (u8)falconidmask); &g->pmu_lsf_loaded_falcon_id, (u8)falconidmask);
if (g->pmu_lsf_loaded_falcon_id != falconidmask) { if (g->pmu_lsf_loaded_falcon_id != falconidmask) {
return -ETIMEDOUT; return -ETIMEDOUT;

View File

@@ -1,7 +1,7 @@
/* /*
* GV100 PMU * GV100 PMU
* *
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -67,7 +67,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
/* check whether pmu is ready to bootstrap lsf if not wait for it */ /* check whether pmu is ready to bootstrap lsf if not wait for it */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&g->pmu_lsf_pmu_wpr_init_done, 1); &g->pmu_lsf_pmu_wpr_init_done, 1);
/* check again if it still not ready indicate an error */ /* check again if it still not ready indicate an error */
if (!g->pmu_lsf_pmu_wpr_init_done) { if (!g->pmu_lsf_pmu_wpr_init_done) {
@@ -90,7 +90,7 @@ int gv100_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
goto exit; goto exit;
} }
pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(&g->pmu, nvgpu_get_poll_timeout(g),
&g->pmu_lsf_loaded_falcon_id, 1); &g->pmu_lsf_loaded_falcon_id, 1);
if (g->pmu_lsf_loaded_falcon_id != 1U) { if (g->pmu_lsf_loaded_falcon_id != 1U) {

View File

@@ -1459,7 +1459,7 @@ int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
*/ */
if (is_copy_back) { if (is_copy_back) {
/* wait till RPC execute in PMU & ACK */ /* wait till RPC execute in PMU & ACK */
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&rpc_payload->complete, 1); &rpc_payload->complete, 1);
/* copy back data to caller */ /* copy back data to caller */
nvgpu_memcpy((u8 *)rpc, (u8 *)rpc_buff, size_rpc); nvgpu_memcpy((u8 *)rpc, (u8 *)rpc_buff, size_rpc);

View File

@@ -490,7 +490,7 @@ int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu)
nvgpu_err(g, "Failed to execute RPC, status=0x%x", status); nvgpu_err(g, "Failed to execute RPC, status=0x%x", status);
} }
pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), pmu_wait_message_cond(pmu, nvgpu_get_poll_timeout(g),
&pmu->perfmon_query, 1); &pmu->perfmon_query, 1);
return status; return status;

View File

@@ -109,7 +109,7 @@ static int therm_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
handlerparams = (struct therm_pmucmdhandler_params*)cb_param; handlerparams = (struct therm_pmucmdhandler_params*)cb_param;
pmu_wait_message_cond(&g->pmu, pmu_wait_message_cond(&g->pmu,
gk20a_get_gr_idle_timeout(g), nvgpu_get_poll_timeout(g),
&handlerparams->success, 1); &handlerparams->success, 1);
if (handlerparams->success == 0U) { if (handlerparams->success == 0U) {

View File

@@ -257,7 +257,7 @@ static void sec2_load_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2,
nvgpu_err(g, "command post failed"); nvgpu_err(g, "command post failed");
} }
err = nvgpu_sec2_wait_message_cond(sec2, gk20a_get_gr_idle_timeout(g), err = nvgpu_sec2_wait_message_cond(sec2, nvgpu_get_poll_timeout(g),
&command_ack, true); &command_ack, true);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "command ack receive failed"); nvgpu_err(g, "command ack receive failed");
@@ -274,7 +274,7 @@ int nvgpu_sec2_bootstrap_ls_falcons(struct gk20a *g, struct nvgpu_sec2 *sec2,
nvgpu_log_fn(g, " "); nvgpu_log_fn(g, " ");
nvgpu_sec2_dbg(g, "Check SEC2 RTOS is ready else wait"); nvgpu_sec2_dbg(g, "Check SEC2 RTOS is ready else wait");
err = nvgpu_sec2_wait_message_cond(&g->sec2, gk20a_get_gr_idle_timeout(g), err = nvgpu_sec2_wait_message_cond(&g->sec2, nvgpu_get_poll_timeout(g),
&g->sec2.sec2_ready, true); &g->sec2.sec2_ready, true);
if (err != 0){ if (err != 0){
nvgpu_err(g, "SEC2 RTOS not ready yet, failed to bootstrap flcn %d", nvgpu_err(g, "SEC2 RTOS not ready yet, failed to bootstrap flcn %d",

View File

@@ -319,7 +319,7 @@ int gr_gk20a_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
delay = GR_IDLE_CHECK_DEFAULT; delay = GR_IDLE_CHECK_DEFAULT;
} }
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
while (check == WAIT_UCODE_LOOP) { while (check == WAIT_UCODE_LOOP) {
@@ -5058,7 +5058,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm); "GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
/* wait for the sm to lock down */ /* wait for the sm to lock down */

View File

@@ -79,7 +79,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
} }
} }
ret = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
if (ret != 0) { if (ret != 0) {
nvgpu_err(g, "timeout init failed err=%d", ret); nvgpu_err(g, "timeout init failed err=%d", ret);

View File

@@ -132,7 +132,7 @@ int gp106_bios_devinit(struct gk20a *g)
} }
err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);
goto out; goto out;
@@ -191,7 +191,7 @@ int gp106_bios_preos(struct gk20a *g)
} }
err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn, err = nvgpu_falcon_clear_halt_intr_status(&g->pmu.flcn,
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
if (err != 0) { if (err != 0) {
nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err); nvgpu_err(g, "falcon_clear_halt_intr_status failed %d", err);
goto out; goto out;

View File

@@ -1519,7 +1519,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr,
"CILP preempt pending, waiting %u msecs for preemption", "CILP preempt pending, waiting %u msecs for preemption",
gk20a_get_gr_idle_timeout(g)); nvgpu_get_poll_timeout(g));
tsg = tsg_gk20a_from_ch(cilp_preempt_pending_ch); tsg = tsg_gk20a_from_ch(cilp_preempt_pending_ch);
if (tsg == NULL) { if (tsg == NULL) {
@@ -1529,7 +1529,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
gr_ctx = tsg->gr_ctx; gr_ctx = tsg->gr_ctx;
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
do { do {
if (!gr_ctx->cilp_preempt_pending) { if (!gr_ctx->cilp_preempt_pending) {

View File

@@ -2848,7 +2848,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "timeout_init failed: %d", err); nvgpu_err(g, "timeout_init failed: %d", err);

View File

@@ -361,7 +361,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
struct nvgpu_timeout timeout; struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT; u32 delay = GR_IDLE_CHECK_DEFAULT;
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
reg_val &= (~(fb_mmu_fault_buffer_size_enable_m())); reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));

View File

@@ -105,7 +105,7 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
u32 delay = GR_IDLE_CHECK_DEFAULT; u32 delay = GR_IDLE_CHECK_DEFAULT;
int err = 0; int err = 0;
err = nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER); NVGPU_TIMER_CPU_TIMER);
if (err != 0) { if (err != 0) {
nvgpu_err(g, "Minion cmd complete timeout init failed"); nvgpu_err(g, "Minion cmd complete timeout init failed");

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved. * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a * Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"), * copy of this software and associated documentation files (the "Software"),
@@ -26,7 +26,7 @@
/* /*
* Default timeout used for channel watchdog and ctxsw timeout. * Default timeout used for channel watchdog and ctxsw timeout.
*/ */
#define NVGPU_DEFAULT_GR_IDLE_TIMEOUT 3000 #define NVGPU_DEFAULT_POLL_TIMEOUT_MS 3000
#define NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT 500 #define NVGPU_DEFAULT_RAILGATE_IDLE_TIMEOUT 500

View File

@@ -1905,7 +1905,7 @@ struct gk20a {
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct railgate_stats pstats; struct railgate_stats pstats;
#endif #endif
u32 gr_idle_timeout_default; u32 poll_timeout_default;
bool timeouts_disabled_by_user; bool timeouts_disabled_by_user;
unsigned int ch_wdt_init_limit_ms; unsigned int ch_wdt_init_limit_ms;
@@ -2140,10 +2140,10 @@ static inline bool nvgpu_is_timeouts_enabled(struct gk20a *g)
return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0; return nvgpu_atomic_read(&g->timeouts_disabled_refcount) == 0;
} }
static inline u32 gk20a_get_gr_idle_timeout(struct gk20a *g) static inline u32 nvgpu_get_poll_timeout(struct gk20a *g)
{ {
return nvgpu_is_timeouts_enabled(g) ? return nvgpu_is_timeouts_enabled(g) ?
g->gr_idle_timeout_default : UINT_MAX; g->poll_timeout_default : U32_MAX;
} }
#define MULTICHAR_TAG(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d)) #define MULTICHAR_TAG(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))

View File

@@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved. * Copyright (C) 2017-2019 NVIDIA Corporation. All rights reserved.
* *
* This software is licensed under the terms of the GNU General Public * This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and * License version 2, as published by the Free Software Foundation, and
@@ -372,10 +372,10 @@ void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
l->debugfs, l->debugfs,
&g->mm.ltc_enabled_target); &g->mm.ltc_enabled_target);
l->debugfs_gr_idle_timeout_default = l->debugfs_poll_timeout_default =
debugfs_create_u32("gr_idle_timeout_default_us", debugfs_create_u32("poll_timeout_default_ms",
S_IRUGO|S_IWUSR, l->debugfs, S_IRUGO|S_IWUSR, l->debugfs,
&g->gr_idle_timeout_default); &g->poll_timeout_default);
l->debugfs_timeouts_enabled = l->debugfs_timeouts_enabled =
debugfs_create_file("timeouts_enabled", debugfs_create_file("timeouts_enabled",
S_IRUGO|S_IWUSR, S_IRUGO|S_IWUSR,

View File

@@ -116,11 +116,11 @@ static void nvgpu_init_timeout(struct gk20a *g)
nvgpu_atomic_set(&g->timeouts_disabled_refcount, 0); nvgpu_atomic_set(&g->timeouts_disabled_refcount, 0);
if (nvgpu_platform_is_silicon(g)) { if (nvgpu_platform_is_silicon(g)) {
g->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT; g->poll_timeout_default = NVGPU_DEFAULT_POLL_TIMEOUT_MS;
} else if (nvgpu_platform_is_fpga(g)) { } else if (nvgpu_platform_is_fpga(g)) {
g->gr_idle_timeout_default = GK20A_TIMEOUT_FPGA; g->poll_timeout_default = GK20A_TIMEOUT_FPGA;
} else { } else {
g->gr_idle_timeout_default = (u32)ULONG_MAX; g->poll_timeout_default = (u32)ULONG_MAX;
} }
g->ch_wdt_init_limit_ms = platform->ch_wdt_init_limit_ms; g->ch_wdt_init_limit_ms = platform->ch_wdt_init_limit_ms;
g->ctxsw_timeout_period_ms = CTXSW_TIMEOUT_PERIOD_MS; g->ctxsw_timeout_period_ms = CTXSW_TIMEOUT_PERIOD_MS;

View File

@@ -144,7 +144,7 @@ struct nvgpu_os_linux {
struct dentry *debugfs_ltc_enabled; struct dentry *debugfs_ltc_enabled;
struct dentry *debugfs_timeouts_enabled; struct dentry *debugfs_timeouts_enabled;
struct dentry *debugfs_gr_idle_timeout_default; struct dentry *debugfs_poll_timeout_default;
struct dentry *debugfs_disable_bigpage; struct dentry *debugfs_disable_bigpage;
struct dentry *debugfs_timeslice_low_priority_us; struct dentry *debugfs_timeslice_low_priority_us;

View File

@@ -428,7 +428,7 @@ int vgpu_probe(struct platform_device *pdev)
dma_set_mask(dev, platform->dma_mask); dma_set_mask(dev, platform->dma_mask);
dma_set_coherent_mask(dev, platform->dma_mask); dma_set_coherent_mask(dev, platform->dma_mask);
gk20a->gr_idle_timeout_default = NVGPU_DEFAULT_GR_IDLE_TIMEOUT; gk20a->poll_timeout_default = NVGPU_DEFAULT_POLL_TIMEOUT_MS;
gk20a->timeouts_disabled_by_user = false; gk20a->timeouts_disabled_by_user = false;
nvgpu_atomic_set(&gk20a->timeouts_disabled_refcount, 0); nvgpu_atomic_set(&gk20a->timeouts_disabled_refcount, 0);