gpu: nvgpu: remove HAL pointer for gk20a_fifo_wait_engine_idle

The corresponding HAL pointer for gk20a_fifo_wait_engine_idle is not
being invoked anywhere and hence they are removed from the code.

The function gk20a_fifo_wait_engine_idle belongs to engine unit and is
only called in a non-safe build, hence its moved to engine unit and is
restricted by a non-safe build flag NVGPU_ENGINE
Also, gk20a_fifo_wait_engine_idle is renamed to nvgpu_engine_wait_for_idle

Jira NVGPU-1315

Change-Id: Ie550c7e46a4284dfe368859d828b1994df34185f
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/2033631
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2019-03-06 11:26:29 +05:30
committed by mobile promotions
parent adc27cc9b4
commit 8fae143b57
15 changed files with 59 additions and 68 deletions

View File

@@ -20,6 +20,10 @@
* DEALINGS IN THE SOFTWARE. * DEALINGS IN THE SOFTWARE.
*/ */
#include <nvgpu/log.h>
#include <nvgpu/errno.h>
#include <nvgpu/timers.h>
#include <nvgpu/bitops.h> #include <nvgpu/bitops.h>
#include <nvgpu/pmu.h> #include <nvgpu/pmu.h>
#include <nvgpu/runlist.h> #include <nvgpu/runlist.h>
@@ -392,4 +396,54 @@ int nvgpu_engine_disable_activity_all(struct gk20a *g,
return ret; return ret;
} }
int nvgpu_engine_wait_for_idle(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
int ret = 0;
u32 i, host_num_engines;
struct nvgpu_engine_status_info engine_status;
nvgpu_log_fn(g, " ");
host_num_engines =
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
for (i = 0; i < host_num_engines; i++) {
ret = -ETIMEDOUT;
do {
g->ops.engine_status.read_engine_status_info(g, i,
&engine_status);
if (!engine_status.is_busy) {
ret = 0;
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
/* possible causes:
* check register settings programmed in hal set by
* elcg_init_idle_filters and init_therm_setup_hw
*/
nvgpu_err(g, "cannot idle engine: %u "
"engine_status: 0x%08x", i,
engine_status.reg_data);
break;
}
}
nvgpu_log_fn(g, "done");
return ret;
}
#endif /* NVGPU_ENGINE */ #endif /* NVGPU_ENGINE */

View File

@@ -446,13 +446,6 @@ int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg)
return err; return err;
} }
int vgpu_fifo_wait_engine_idle(struct gk20a *g)
{
nvgpu_log_fn(g, " ");
return 0;
}
int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose) u32 err_code, bool verbose)
{ {

View File

@@ -43,7 +43,6 @@ int vgpu_channel_setup_ramfc(struct channel_gk20a *ch, u64 gpfifo_base,
int vgpu_fifo_init_engine_info(struct fifo_gk20a *f); int vgpu_fifo_init_engine_info(struct fifo_gk20a *f);
int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch); int vgpu_fifo_preempt_channel(struct gk20a *g, struct channel_gk20a *ch);
int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg); int vgpu_fifo_preempt_tsg(struct gk20a *g, struct tsg_gk20a *tsg);
int vgpu_fifo_wait_engine_idle(struct gk20a *g);
int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice); int vgpu_channel_set_timeslice(struct channel_gk20a *ch, u32 timeslice);
int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch, int vgpu_fifo_force_reset_ch(struct channel_gk20a *ch,
u32 err_code, bool verbose); u32 err_code, bool verbose);

View File

@@ -379,7 +379,6 @@ static const struct gpu_ops vgpu_gp10b_ops = {
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc, .get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc, .get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc, .get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = vgpu_tsg_set_timeslice, .tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open, .tsg_open = vgpu_tsg_open,

View File

@@ -449,7 +449,6 @@ static const struct gpu_ops vgpu_gv11b_ops = {
.get_mmu_fault_desc = NULL, .get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL, .get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL, .get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = vgpu_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = vgpu_tsg_set_timeslice, .tsg_set_timeslice = vgpu_tsg_set_timeslice,
.tsg_open = vgpu_tsg_open, .tsg_open = vgpu_tsg_open,

View File

@@ -2402,54 +2402,6 @@ bool gk20a_fifo_is_engine_busy(struct gk20a *g)
return false; return false;
} }
int gk20a_fifo_wait_engine_idle(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 delay = GR_IDLE_CHECK_DEFAULT;
int ret = 0;
u32 i, host_num_engines;
struct nvgpu_engine_status_info engine_status;
nvgpu_log_fn(g, " ");
host_num_engines =
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
NVGPU_TIMER_CPU_TIMER);
for (i = 0; i < host_num_engines; i++) {
ret = -ETIMEDOUT;
do {
g->ops.engine_status.read_engine_status_info(g, i,
&engine_status);
if (!engine_status.is_busy) {
ret = 0;
break;
}
nvgpu_usleep_range(delay, delay * 2U);
delay = min_t(u32,
delay << 1, GR_IDLE_CHECK_MAX);
} while (nvgpu_timeout_expired(&timeout) == 0);
if (ret != 0) {
/* possible causes:
* check register settings programmed in hal set by
* elcg_init_idle_filters and init_therm_setup_hw
*/
nvgpu_err(g, "cannot idle engine: %u "
"engine_status: 0x%08x", i,
engine_status.reg_data);
break;
}
}
nvgpu_log_fn(g, "done");
return ret;
}
u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g) u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g)
{ {
return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f(); return pbdma_signature_hw_valid_f() | pbdma_signature_sw_zero_f();

View File

@@ -278,7 +278,6 @@ int gk20a_fifo_tsg_unbind_channel(struct channel_gk20a *ch);
void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g, void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
unsigned long fault_id); unsigned long fault_id);
int gk20a_fifo_wait_engine_idle(struct gk20a *g);
bool gk20a_fifo_is_engine_busy(struct gk20a *g); bool gk20a_fifo_is_engine_busy(struct gk20a *g);
u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g); u32 gk20a_fifo_get_pbdma_signature(struct gk20a *g);
u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g, u32 gk20a_fifo_get_failing_engine_data(struct gk20a *g,

View File

@@ -515,7 +515,6 @@ static const struct gpu_ops gm20b_ops = {
.get_mmu_fault_desc = gk20a_fifo_get_mmu_fault_desc, .get_mmu_fault_desc = gk20a_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gk20a_fifo_get_mmu_fault_client_desc, .get_mmu_fault_client_desc = gk20a_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc, .get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_pbdma_signature = gk20a_fifo_get_pbdma_signature, .get_pbdma_signature = gk20a_fifo_get_pbdma_signature,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch, .force_reset_ch = gk20a_fifo_force_reset_ch,

View File

@@ -568,7 +568,6 @@ static const struct gpu_ops gp10b_ops = {
.get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc, .get_mmu_fault_desc = gp10b_fifo_get_mmu_fault_desc,
.get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc, .get_mmu_fault_client_desc = gp10b_fifo_get_mmu_fault_client_desc,
.get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc, .get_mmu_fault_gpc_desc = gm20b_fifo_get_mmu_fault_gpc_desc,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch, .force_reset_ch = gk20a_fifo_force_reset_ch,

View File

@@ -736,7 +736,6 @@ static const struct gpu_ops gv100_ops = {
.get_mmu_fault_desc = NULL, .get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL, .get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL, .get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch, .force_reset_ch = gk20a_fifo_force_reset_ch,

View File

@@ -689,7 +689,6 @@ static const struct gpu_ops gv11b_ops = {
.get_mmu_fault_desc = NULL, .get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL, .get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL, .get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch, .force_reset_ch = gk20a_fifo_force_reset_ch,

View File

@@ -60,4 +60,6 @@ int nvgpu_engine_disable_activity(struct gk20a *g,
int nvgpu_engine_disable_activity_all(struct gk20a *g, int nvgpu_engine_disable_activity_all(struct gk20a *g,
bool wait_for_idle); bool wait_for_idle);
int nvgpu_engine_wait_for_idle(struct gk20a *g);
#endif /*NVGPU_ENGINE_H*/ #endif /*NVGPU_ENGINE_H*/

View File

@@ -787,7 +787,6 @@ struct gpu_ops {
void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault); void (*get_mmu_fault_gpc_desc)(struct mmu_fault_info *mmfault);
void (*apply_pb_timeout)(struct gk20a *g); void (*apply_pb_timeout)(struct gk20a *g);
void (*apply_ctxsw_timeout_intr)(struct gk20a *g); void (*apply_ctxsw_timeout_intr)(struct gk20a *g);
int (*wait_engine_idle)(struct gk20a *g);
u32 (*get_pbdma_signature)(struct gk20a *g); u32 (*get_pbdma_signature)(struct gk20a *g);
int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice); int (*tsg_set_timeslice)(struct tsg_gk20a *tsg, u32 timeslice);
u32 (*default_timeslice_us)(struct gk20a *g); u32 (*default_timeslice_us)(struct gk20a *g);

View File

@@ -969,7 +969,7 @@ int nvgpu_quiesce(struct gk20a *g)
return err; return err;
} }
err = gk20a_fifo_wait_engine_idle(g); err = nvgpu_engine_wait_for_idle(g);
if (err) { if (err) {
nvgpu_err(g, "failed to idle engines, err=%d", nvgpu_err(g, "failed to idle engines, err=%d",
err); err);

View File

@@ -765,7 +765,6 @@ static const struct gpu_ops tu104_ops = {
.get_mmu_fault_desc = NULL, .get_mmu_fault_desc = NULL,
.get_mmu_fault_client_desc = NULL, .get_mmu_fault_client_desc = NULL,
.get_mmu_fault_gpc_desc = NULL, .get_mmu_fault_gpc_desc = NULL,
.wait_engine_idle = gk20a_fifo_wait_engine_idle,
.get_pbdma_signature = gp10b_fifo_get_pbdma_signature, .get_pbdma_signature = gp10b_fifo_get_pbdma_signature,
.tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice,
.force_reset_ch = gk20a_fifo_force_reset_ch, .force_reset_ch = gk20a_fifo_force_reset_ch,