gpu: nvgpu: simplify nvgpu_timeout_init

nvgpu_timeout_init() returns an error code only when the flags parameter
is invalid. There are very few possible values for flags, so extract the
two most common cases - cpu clock based and a retry based timeout - to
functions that cannot fail and thus return nothing. Adjust all callers
to use those, simplfying error handling quite a bit.

Change-Id: I985fe7fa988ebbae25601d15cf57fd48eda0c677
Signed-off-by: Konsta Hölttä <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2613833
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Hölttä
2021-10-20 18:01:06 +03:00
committed by mobile promotions
parent 9b3f3ea4be
commit f4ec400d5f
65 changed files with 222 additions and 510 deletions

View File

@@ -93,7 +93,7 @@ int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
{
struct nvgpu_timeout to;
struct gk20a *g;
int status;
int status = 0;
if (!is_falcon_valid(flcn)) {
return -EINVAL;
@@ -101,10 +101,7 @@ int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
g = flcn->g;
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
if (status != 0) {
return status;
}
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
do {
if (g->ops.falcon.is_falcon_cpu_halted(flcn)) {
@@ -125,7 +122,6 @@ int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn)
{
struct nvgpu_timeout timeout;
struct gk20a *g;
int status;
if (!is_falcon_valid(flcn)) {
return -EINVAL;
@@ -133,10 +129,7 @@ int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn)
g = flcn->g;
status = nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
if (status != 0) {
return status;
}
nvgpu_timeout_init_retry(g, &timeout, 2000);
/* wait for falcon idle */
do {
@@ -166,7 +159,7 @@ int nvgpu_falcon_mem_scrub_wait(struct nvgpu_falcon *flcn)
const u32 mem_scrubbing_max_timeout = 1000U;
const u32 mem_scrubbing_default_timeout = 10U;
struct gk20a *g;
int status;
int status = 0;
if (!is_falcon_valid(flcn)) {
return -EINVAL;
@@ -175,13 +168,9 @@ int nvgpu_falcon_mem_scrub_wait(struct nvgpu_falcon *flcn)
g = flcn->g;
/* check IMEM/DMEM scrubbing complete status */
status = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
mem_scrubbing_max_timeout /
mem_scrubbing_default_timeout,
NVGPU_TIMER_RETRY_TIMER);
if (status != 0) {
return status;
}
mem_scrubbing_default_timeout);
do {
if (g->ops.falcon.is_falcon_scrubbing_done(flcn)) {
@@ -670,7 +659,7 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
{
struct nvgpu_timeout to;
struct gk20a *g;
int status;
int status = 0;
if (!is_falcon_valid(flcn)) {
return -EINVAL;
@@ -678,10 +667,7 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
g = flcn->g;
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
if (status != 0) {
return status;
}
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
do {
if (g->ops.falcon.clear_halt_interrupt_status(flcn)) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -139,15 +139,11 @@ void nvgpu_channel_worker_poll_init(struct nvgpu_worker *worker)
{
struct nvgpu_channel_worker *ch_worker =
nvgpu_channel_worker_from_worker(worker);
int ret;
ch_worker->watchdog_interval = 100U;
ret = nvgpu_timeout_init(worker->g, &ch_worker->timeout,
ch_worker->watchdog_interval, NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(worker->g, "timeout_init failed: %d", ret);
}
nvgpu_timeout_init_cpu_timer(worker->g, &ch_worker->timeout,
ch_worker->watchdog_interval);
}
/**
@@ -176,16 +172,11 @@ void nvgpu_channel_worker_poll_wakeup_post_process_item(
struct nvgpu_channel_worker *ch_worker =
nvgpu_channel_worker_from_worker(worker);
int ret;
if (nvgpu_timeout_peek_expired(&ch_worker->timeout)) {
nvgpu_channel_poll_wdt(g);
ret = nvgpu_timeout_init(g, &ch_worker->timeout,
ch_worker->watchdog_interval,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout_init failed: %d", ret);
}
nvgpu_timeout_init_cpu_timer(g, &ch_worker->timeout,
ch_worker->watchdog_interval);
}
}

View File

@@ -339,7 +339,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
int ret = 0, err = 0;
int ret = 0;
u32 i, host_num_engines;
struct nvgpu_engine_status_info engine_status;
@@ -348,11 +348,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
host_num_engines =
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return -EINVAL;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
for (i = 0; i < host_num_engines; i++) {
if (!nvgpu_engine_check_valid_id(g, i)) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -91,15 +91,8 @@ static void nvgpu_channel_wdt_init(struct nvgpu_channel_wdt *wdt,
struct nvgpu_channel_wdt_state *state)
{
struct gk20a *g = wdt->g;
int ret;
ret = nvgpu_timeout_init(g, &wdt->timer,
wdt->limit_ms,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout_init failed: %d", ret);
return;
}
nvgpu_timeout_init_cpu_timer(g, &wdt->timer, wdt->limit_ms);
wdt->ch_state = *state;
wdt->running = true;

View File

@@ -79,10 +79,7 @@ static int gsp_write_cmd(struct nvgpu_gsp *gsp,
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
err = nvgpu_gsp_queue_push(gsp->queues, queue_id, gsp->gsp_flcn,

View File

@@ -217,7 +217,7 @@ int nvgpu_gsp_wait_message_cond(struct nvgpu_gsp *gsp, u32 timeout_ms,
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
if (*(u8 *)var == val) {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,16 +41,12 @@
void nvgpu_vidmem_destroy(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
if (g->ops.fb.get_vidmem_size == NULL) {
return;
}
err = nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
}
nvgpu_timeout_init_retry(g, &timeout, 100);
/*
* Ensure that the thread runs one last time to flush anything in the
@@ -98,13 +94,7 @@ static int nvgpu_vidmem_clear_fence_wait(struct gk20a *g,
bool done;
int err;
err = nvgpu_timeout_init(g, &timeout,
nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
err = nvgpu_fence_wait(g, fence_out,

View File

@@ -1636,12 +1636,7 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm,
/*
* 100ms timer.
*/
ret = nvgpu_timeout_init(vm->mm->g, &timeout, 100,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(vm->mm->g, "timeout_init failed (%d)", ret);
return ret;
}
nvgpu_timeout_init_cpu_timer(vm->mm->g, &timeout, 100);
nvgpu_mutex_release(&vm->update_gmmu_lock);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -98,12 +98,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
goto exit;
}
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion boot timeout init failed");
goto exit;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
err = g->ops.nvlink.minion.is_boot_complete(g, &boot_cmplte);

View File

@@ -163,11 +163,7 @@ int nvgpu_perfbuf_update_get_put(struct gk20a *g, u64 bytes_consumed,
}
if (update_available_bytes && wait && available_bytes_va != NULL) {
err = nvgpu_timeout_init(g, &timeout, 10000, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, 10000);
do {
if (*available_bytes_va != 0xffffffff) {

View File

@@ -108,15 +108,9 @@ int nvgpu_pmu_wait_fw_ack_status(struct gk20a *g, struct nvgpu_pmu *pmu,
u32 timeout_ms, void *var, u8 val)
{
struct nvgpu_timeout timeout;
int err;
unsigned int delay = POLL_DELAY_MIN_US;
err = nvgpu_timeout_init(g, &timeout, timeout_ms,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "PMU wait timeout init failed.");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
nvgpu_rmb();

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -158,11 +158,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout, U32_MAX, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "failed to init timer");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, U32_MAX);
do {
err = nvgpu_pmu_queue_push(&pmu->queues, pmu->flcn,

View File

@@ -788,7 +788,6 @@ static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu,
struct nvgpu_pmu_pg *pg)
{
struct nvgpu_timeout timeout;
int err = 0;
/* make sure the pending operations are finished before we continue */
if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
@@ -800,12 +799,7 @@ static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu,
nvgpu_thread_stop(&pg->pg_init.state_task);
/* wait to confirm thread stopped */
err = nvgpu_timeout_init(g, &timeout, 1000,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "timeout_init failed err=%d", err);
return;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
do {
if (!nvgpu_thread_is_running(&pg->pg_init.state_task)) {
break;

View File

@@ -332,14 +332,11 @@ static int nvgpu_pmu_wait_for_priv_lockdown_release(struct gk20a *g,
struct nvgpu_falcon *flcn, unsigned int timeout)
{
struct nvgpu_timeout to;
int status;
int status = 0;
nvgpu_log_fn(g, " ");
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
if (status != 0) {
return status;
}
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
/* poll for priv lockdown release */
do {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -72,7 +72,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
nvgpu_log_fn(g, " ");
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
err = nvgpu_sec2_queue_push(sec2->queues, queue_id, &sec2->flcn,

View File

@@ -258,7 +258,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
if (*(u8 *)var == val) {

View File

@@ -853,13 +853,8 @@ u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
bool nvgpu_bios_wait_for_init_done(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return false;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS);
/* Wait till vbios is completed */
do {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -85,14 +85,9 @@ int gv100_bios_preos_wait_for_halt(struct gk20a *g)
g->ops.bus.write_sw_scratch(g, SCRATCH_PMU_EXIT_AND_HALT,
PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
err = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
PMU_BOOT_TIMEOUT_MAX /
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "NVGPU timeout init failed");
return err;
}
PMU_BOOT_TIMEOUT_DEFAULT);
do {
progress = g->ops.bus.read_sw_scratch(g,
@@ -180,14 +175,10 @@ int gv100_bios_devinit(struct gk20a *g)
goto out;
}
err = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
PMU_BOOT_TIMEOUT_MAX /
PMU_BOOT_TIMEOUT_DEFAULT,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu timeout init failed %d", err);
goto out;
}
PMU_BOOT_TIMEOUT_DEFAULT);
do {
top_scratch1_reg = g->ops.top.read_top_scratch1_reg(g);
devinit_completed = ((g->ops.falcon.is_falcon_cpu_halted(

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -119,13 +119,8 @@ int tu104_bios_verify_devinit(struct gk20a *g)
struct nvgpu_timeout timeout;
u32 val;
u32 aon_secure_scratch_reg;
int err;
err = nvgpu_timeout_init(g, &timeout,
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS);
do {
aon_secure_scratch_reg = g->ops.bios.get_aon_secure_scratch_reg(g, 0);

View File

@@ -1,7 +1,7 @@
/*
* GM20B MMU
*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,11 +50,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
bus_bar1_block_target_vid_mem_f()) |
bus_bar1_block_mode_virtual_f() |
bus_bar1_block_ptr_f(ptr_v));
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
do {
u32 val = gk20a_readl(g, bus_bind_status_r());

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -47,11 +47,7 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
bus_bar2_block_target_vid_mem_f()) |
bus_bar2_block_mode_virtual_f() |
bus_bar2_block_ptr_f(ptr_v));
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
do {
u32 val = gk20a_readl(g, bus_bind_status_r());

View File

@@ -66,10 +66,7 @@ int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", ptr_v);
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
nvgpu_func_writel(g, func_priv_bar2_block_r(),
nvgpu_aperture_mask(g, bar2_inst,

View File

@@ -1,7 +1,7 @@
/*
* GM20B CBC
*
* Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -180,8 +180,7 @@ int gm20b_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
ltc * ltc_stride + slice * lts_stride;
nvgpu_timeout_init(g, &timeout, 2000,
NVGPU_TIMER_RETRY_TIMER);
nvgpu_timeout_init_retry(g, &timeout, 2000);
do {
val = gk20a_readl(g, ctrl1);
if ((val & hw_op) == 0U) {

View File

@@ -1,7 +1,7 @@
/*
* GP10B CBC
*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -180,8 +180,7 @@ int gp10b_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
ltc * ltc_stride + slice * lts_stride;
nvgpu_timeout_init(g, &timeout, 2000,
NVGPU_TIMER_RETRY_TIMER);
nvgpu_timeout_init_retry(g, &timeout, 2000);
do {
val = gk20a_readl(g, ctrl1);
if ((val & hw_op) == 0U) {

View File

@@ -179,8 +179,7 @@ int tu104_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
ltc * ltc_stride + slice * lts_stride;
nvgpu_timeout_init(g, &timeout, 2000,
NVGPU_TIMER_RETRY_TIMER);
nvgpu_timeout_init_retry(g, &timeout, 2000);
do {
val = nvgpu_readl(g, ctrl1);
if ((val & hw_op) == 0U) {

View File

@@ -268,15 +268,10 @@ void ga10b_fb_dump_vpr_info(struct gk20a *g)
static int ga10b_fb_vpr_mode_fetch_poll(struct gk20a *g, unsigned int poll_ms)
{
struct nvgpu_timeout timeout;
int err = 0;
u32 val = 0U;
u32 delay = POLL_DELAY_MIN_US;
err = nvgpu_timeout_init(g, &timeout, poll_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, poll_ms);
do {
val = nvgpu_readl(g, fb_mmu_vpr_mode_r());

View File

@@ -100,12 +100,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
trace_gk20a_mm_tlb_invalidate(g->name);
#endif
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init(mmu fifo space) failed err=%d",
err);
goto out;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
do {
data = gk20a_readl(g, fb_mmu_ctrl_r());
@@ -121,12 +116,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
goto out;
}
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init(mmu invalidate) failed err=%d",
err);
goto out;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
fb_mmu_invalidate_pdb_addr_f(addr_lo) |
@@ -228,13 +218,8 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
unsigned int msec)
{
struct nvgpu_timeout timeout;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, msec, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, msec);
do {
u32 val;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -170,14 +170,8 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
} else {
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
int err;
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
g->ops.fb.write_mmu_fault_buffer_size(g, index, reg_val);
@@ -695,12 +689,8 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
nvgpu_writel(g, fb_mmu_invalidate_r(), reg_val);
/* retry 200 times */
err = nvgpu_timeout_init(g, &timeout, 200U, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 200U);
do {
reg_val = nvgpu_readl(g, fb_mmu_ctrl_r());
if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) !=

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -208,11 +208,7 @@ int tu104_fb_mmu_invalidate_replay(struct gk20a *g,
nvgpu_log_fn(g, " ");
/* retry 200 times */
err = nvgpu_timeout_init(g, &timeout, 200U, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 200U);
nvgpu_mutex_acquire(&g->mm.tlb_lock);

View File

@@ -62,10 +62,7 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
nvgpu_mutex_acquire(&g->mm.tlb_lock);
@@ -170,12 +167,8 @@ static int tu104_fb_wait_mmu_bind(struct gk20a *g)
{
struct nvgpu_timeout timeout;
u32 val;
int err;
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_retry(g, &timeout, 1000);
do {
val = nvgpu_readl(g, fb_mmu_bind_r());

View File

@@ -214,7 +214,6 @@ int ga10b_fb_vab_dump_and_clear(struct gk20a *g, u64 *user_buf,
* poll NV_PFB_PRI_MMU_VIDMEM_ACCESS_BIT_DUMP_TRIGGER to be cleared
* clear what? buffer or access bits or buffer_put_ptr
*/
int err;
struct nvgpu_mem *vab_buf = &g->mm.vab.buffer;
u64 buffer_offset = 0ULL;
u64 req_buf_size = 0U;
@@ -267,11 +266,7 @@ int ga10b_fb_vab_dump_and_clear(struct gk20a *g, u64 *user_buf,
vab_dump_reg = nvgpu_readl(g, fb_mmu_vidmem_access_bit_dump_r());
nvgpu_log(g, gpu_dbg_vab, "vab_dump_reg 0x%x", vab_dump_reg);
err = nvgpu_timeout_init(g, &timeout, 1000U, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Timeout init failed");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, 1000U);
/* Check if trigger is cleared vab bits collection complete */
do {

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -105,11 +105,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
fifo_trigger_mmu_fault_enable_f(1U));
}
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout init failed err=%d", ret);
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
/* Wait for MMU fault to trigger */
ret = -EBUSY;

View File

@@ -72,12 +72,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
u32 delay = POLL_DELAY_MIN_US;
int ret;
ret = nvgpu_timeout_init(g, &timeout, nvgpu_preempt_get_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout_init failed: %d", ret);
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
ret = -EBUSY;
do {

View File

@@ -102,14 +102,7 @@ int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid,
unsigned int loop_count = 0;
struct nvgpu_pbdma_status_info pbdma_status;
/* timeout in milli seconds */
ret = nvgpu_timeout_init(g, &timeout,
nvgpu_preempt_get_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout_init failed: %d", ret);
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
/* Default return value */
ret = -EBUSY;
@@ -231,14 +224,7 @@ static int gv11b_fifo_preempt_poll_eng(struct gk20a *g, u32 id,
u32 eng_intr_pending;
struct nvgpu_engine_status_info engine_status;
/* timeout in milli seconds */
ret = nvgpu_timeout_init(g, &timeout,
nvgpu_preempt_get_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "timeout_init failed: %d", ret);
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
/* Default return value */
ret = -EBUSY;

View File

@@ -89,11 +89,7 @@ int ga10b_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
runlist = g->fifo.runlists[runlist_id];
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
ret = -ETIMEDOUT;
do {

View File

@@ -71,12 +71,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
u32 delay = POLL_DELAY_MIN_US;
int ret = 0;
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", ret);
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
ret = -ETIMEDOUT;
do {

View File

@@ -70,11 +70,7 @@ int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
u32 delay = POLL_DELAY_MIN_US;
int ret;
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
ret = -ETIMEDOUT;
do {

View File

@@ -190,20 +190,14 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
int gm20b_gr_falcon_wait_mem_scrubbing(struct gk20a *g)
{
struct nvgpu_timeout timeout;
int err;
bool fecs_scrubbing;
bool gpccs_scrubbing;
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
CTXSW_MEM_SCRUBBING_TIMEOUT_MAX_US /
CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT_US,
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "ctxsw mem scrub timeout_init failed: %d", err);
return err;
}
CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT_US);
do {
fecs_scrubbing = (nvgpu_readl(g, gr_fecs_dmactl_r()) &
@@ -433,12 +427,7 @@ static int gm20b_gr_falcon_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
delay = POLL_DELAY_MIN_US;
}
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "ctxsw wait ucode timeout_init failed: %d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
while (check == WAIT_UCODE_LOOP) {
if (nvgpu_timeout_expired(&timeout) != 0) {

View File

@@ -1683,8 +1683,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
/* wait for the sm to lock down */
do {

View File

@@ -726,8 +726,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
gr_ctx = tsg->gr_ctx;
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
if (!nvgpu_gr_ctx_get_cilp_preempt_pending(gr_ctx)) {
break;

View File

@@ -1256,7 +1256,6 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
u32 dbgr_status0 = 0;
u32 warp_esr, global_esr;
struct nvgpu_timeout timeout;
int err;
u32 offset = nvgpu_gr_gpc_offset(g, gpc) +
nvgpu_gr_tpc_offset(g, tpc) +
nvgpu_gr_sm_offset(g, sm);
@@ -1264,12 +1263,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
"GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
err = nvgpu_timeout_init(g, &timeout, g->poll_timeout_default,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "timeout_init failed: %d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, g->poll_timeout_default);
/* wait for the sm to lock down */
do {

View File

@@ -328,17 +328,12 @@ void ga10b_gr_init_commit_global_timeslice(struct gk20a *g)
int ga10b_gr_init_wait_idle(struct gk20a *g)
{
u32 delay = POLL_DELAY_MIN_US;
int err = 0;
bool gr_busy;
struct nvgpu_timeout timeout;
nvgpu_log(g, gpu_dbg_verbose | gpu_dbg_gr, " ");
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
/*
@@ -468,16 +463,10 @@ int ga10b_gr_init_wait_empty(struct gk20a *g)
u32 gr_status;
u32 activity0, activity1, activity4;
struct nvgpu_timeout timeout;
int err;
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "timeout_init failed: %d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
gr_status = nvgpu_readl(g, gr_status_r());

View File

@@ -180,7 +180,6 @@ int gm20b_gr_init_wait_idle(struct gk20a *g)
{
u32 delay = POLL_DELAY_MIN_US;
u32 gr_engine_id;
int err = 0;
bool ctxsw_active;
bool gr_busy;
bool ctx_status_invalid;
@@ -191,11 +190,7 @@ int gm20b_gr_init_wait_idle(struct gk20a *g)
gr_engine_id = nvgpu_engine_get_gr_id(g);
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
/*
@@ -236,7 +231,6 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
u32 val;
u32 delay = POLL_DELAY_MIN_US;
struct nvgpu_timeout timeout;
int err = 0;
#ifdef CONFIG_NVGPU_SIM
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
@@ -246,11 +240,7 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
nvgpu_log(g, gpu_dbg_verbose, " ");
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
val = nvgpu_readl(g, gr_status_r());
@@ -289,13 +279,9 @@ int gm20b_gr_init_fe_pwr_mode_force_on(struct gk20a *g, bool force_on)
gr_fe_pwr_mode_mode_auto_f();
}
ret = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
FE_PWR_MODE_TIMEOUT_MAX_US /
FE_PWR_MODE_TIMEOUT_DEFAULT_US,
NVGPU_TIMER_RETRY_TIMER);
if (ret != 0) {
return ret;
}
FE_PWR_MODE_TIMEOUT_DEFAULT_US);
nvgpu_writel(g, gr_fe_pwr_mode_r(), reg_val);

View File

@@ -65,16 +65,10 @@ int gp10b_gr_init_wait_empty(struct gk20a *g)
u32 gr_status;
u32 activity0, activity1, activity2, activity4;
struct nvgpu_timeout timeout;
int err;
nvgpu_log_fn(g, " ");
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "timeout_init failed: %d", err);
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
/* fmodel: host gets fifo_engine_status(gr) from gr

View File

@@ -74,19 +74,13 @@ static int gr_gv11b_ecc_scrub_is_done(struct gk20a *g,
u32 val;
u32 gpc, tpc;
u32 gpc_offset, tpc_offset;
int err;
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
GPU_LIT_TPC_IN_GPC_STRIDE);
err = nvgpu_timeout_init(g, &timeout,
nvgpu_timeout_init_retry(g, &timeout,
(GR_ECC_SCRUBBING_TIMEOUT_MAX_US /
GR_ECC_SCRUBBING_TIMEOUT_DEFAULT_US),
NVGPU_TIMER_RETRY_TIMER);
if (err != 0) {
nvgpu_err(g, "timeout_init failed: %d", err);
return err;
}
GR_ECC_SCRUBBING_TIMEOUT_DEFAULT_US));
for (gpc = 0; gpc < nvgpu_gr_config_get_gpc_count(gr_config); gpc++) {
gpc_offset = nvgpu_safe_mult_u32(gpc_stride, gpc);

View File

@@ -66,11 +66,7 @@ static int gm20b_ltc_wait_for_clean(struct gk20a *g)
*
* So 5ms timeout here should be more than sufficient.
*/
err = nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return -ETIMEDOUT;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, 5);
do {
u32 cmgmt1 = nvgpu_safe_add_u32(
@@ -100,11 +96,7 @@ static int gm20b_ltc_wait_for_invalidate(struct gk20a *g)
u32 op_pending;
/* Again, 5ms. */
err = nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
return -ETIMEDOUT;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, 5);
do {
u32 cmgmt0 = nvgpu_safe_add_u32(

View File

@@ -75,14 +75,8 @@ static int ga10b_mc_poll_device_enable(struct gk20a *g, u32 reg_idx,
u32 reg_val;
u32 delay = POLL_DELAY_MIN_US;
struct nvgpu_timeout timeout;
int err;
err = nvgpu_timeout_init(g, &timeout, MC_ENGINE_RESET_DELAY_US,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Timeout init failed");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, MC_ENGINE_RESET_DELAY_US);
reg_val = nvgpu_readl(g, mc_device_enable_r(reg_idx));

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -50,8 +50,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_timeout_init_retry(g, &timeout, retries);
nvgpu_mutex_acquire(&mm->l2_op_lock);

View File

@@ -53,8 +53,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_timeout_init_retry(g, &timeout, retries);
nvgpu_mutex_acquire(&mm->l2_op_lock);
@@ -118,8 +117,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_timeout_init_retry(g, &timeout, retries);
/* Invalidate any clean lines from the L2 so subsequent reads go to
DRAM. Dirty lines are not affected by this operation. */
@@ -182,8 +180,7 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
}
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
NVGPU_TIMER_RETRY_TIMER) == 0);
nvgpu_timeout_init_retry(g, &timeout, retries);
nvgpu_mutex_acquire(&mm->l2_op_lock);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -241,12 +241,8 @@ int gv100_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
/* Poll for links to go up */
links_off = (u32) link_mask;
err = nvgpu_timeout_init(g, &timeout,
NVLINK_PLL_ON_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "PLL ON timeout init failed");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NVLINK_PLL_ON_TIMEOUT_MS);
do {
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
@@ -303,13 +299,8 @@ static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask)
for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) {
link_id = (u32)bit;
/* Timeout from HW specs */
ret = nvgpu_timeout_init(g, &timeout,
8*NVLINK_SUBLINK_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "Timeout threshold init failed");
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout,
8*NVLINK_SUBLINK_TIMEOUT_MS);
reg = DLPL_REG_RD32(g, link_id, nvl_br0_cfg_cal_r());
reg = set_field(reg, nvl_br0_cfg_cal_rxcal_m(),
nvl_br0_cfg_cal_rxcal_on_f());
@@ -496,14 +487,9 @@ static int gv100_nvlink_link_sublink_check_change(struct gk20a *g, u32 link_id)
{
struct nvgpu_timeout timeout;
u32 reg;
int err = 0;
err = nvgpu_timeout_init(g, &timeout,
NVLINK_SUBLINK_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Sublink mode change timeout init failed");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NVLINK_SUBLINK_TIMEOUT_MS);
/* Poll for sublink status */
do {
reg = DLPL_REG_RD32(g, link_id, nvl_sublink_change_r());
@@ -524,7 +510,7 @@ static int gv100_nvlink_link_sublink_check_change(struct gk20a *g, u32 link_id)
if (nvgpu_timeout_peek_expired(&timeout)) {
return -ETIMEDOUT;
}
return err;
return 0;
}
int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -48,12 +48,8 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
return ret;
}
ret = nvgpu_timeout_init(g, &timeout,
NV_NVLINK_REG_POLL_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "Error during timeout init");
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
do {
reg = DLPL_REG_RD32(g, link_id, nvl_clk_status_r());
if (nvl_clk_status_txclk_sts_v(reg) ==
@@ -78,14 +74,8 @@ u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Failed to init timeout: %d", err);
goto result;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
/* Poll till substate value becomes STABLE */
do {
@@ -102,7 +92,6 @@ u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
nvl_sl0_slsm_status_tx_primary_state_v(reg),
nvl_sl0_slsm_status_tx_substate_v(reg));
result:
return nvl_sl0_slsm_status_tx_primary_state_unknown_v();
}
@@ -110,14 +99,8 @@ u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
{
u32 reg;
struct nvgpu_timeout timeout;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Failed to init timeout: %d", err);
goto result;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
/* Poll till substate value becomes STABLE */
do {
@@ -134,7 +117,6 @@ u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
nvl_sl1_slsm_status_rx_primary_state_v(reg),
nvl_sl1_slsm_status_rx_substate_v(reg));
result:
return nvl_sl1_slsm_status_rx_primary_state_unknown_v();
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -104,14 +104,8 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
u32 reg;
struct nvgpu_timeout timeout;
u32 delay = POLL_DELAY_MIN_US;
int err = 0;
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "Minion cmd complete timeout init failed");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
do {
reg = MINION_REG_RD32(g, minion_nvlink_dl_cmd_r(link_id));
@@ -145,7 +139,7 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
}
nvgpu_log(g, gpu_dbg_nvlink, "minion cmd Complete");
return err;
return 0;
}
u32 gv100_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -58,12 +58,7 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
return ret;
}
ret = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
nvgpu_err(g, "Error during timeout init");
return ret;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
do {
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_link_rxdet_status_r());

View File

@@ -665,13 +665,8 @@ int ga10b_perf_wait_for_idle_pma(struct gk20a *g)
u32 status, rbufempty_status;
u32 timeout_ms = 1;
u32 reg_val;
int err;
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "failed to init timeout");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());

View File

@@ -535,13 +535,8 @@ static int poll_for_pmm_router_idle(struct gk20a *g, u32 offset, u32 timeout_ms)
struct nvgpu_timeout timeout;
u32 reg_val;
u32 status;
int err;
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "failed to init timeout");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
reg_val = nvgpu_readl(g, offset);
@@ -624,13 +619,8 @@ int gv11b_perf_wait_for_idle_pma(struct gk20a *g)
u32 status, rbufempty_status;
u32 timeout_ms = 1;
u32 reg_val;
int err;
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
if (err != 0) {
nvgpu_err(g, "failed to init timeout");
return err;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
do {
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());

View File

@@ -242,11 +242,7 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER) != 0) {
nvgpu_err(g, "failed to init timeout");
goto done;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
do {
@@ -324,11 +320,8 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER) != 0) {
nvgpu_err(g, "failed to init timeout");
goto done;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
do {
gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
if (pl_link_config ==
@@ -360,11 +353,7 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
* Read NV_XP_PL_LINK_CONFIG until the link has swapped to
* the target speed.
*/
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
NVGPU_TIMER_CPU_TIMER) != 0) {
nvgpu_err(g, "failed to init timeout");
goto done;
}
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
do {
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
if (pl_link_config != 0xfffffffU &&

View File

@@ -149,9 +149,36 @@ struct nvgpu_timeout {
*
* @retval -EINVAL invalid input parameter.
*/
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration, unsigned long flags);
/**
* @brief Initialize a timeout.
*
* Init a cpu clock based timeout. See nvgpu_timeout_init_flags() and
* NVGPU_TIMER_CPU_TIMER for full explanation.
*
* @param g [in] GPU driver structure.
* @param timeout [in] Timeout object to initialize.
* @param duration [in] Timeout duration in milliseconds.
*/
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_ms);
/**
* @brief Initialize a timeout.
*
* Init a retry based timeout. See nvgpu_timeout_init_flags() and
* NVGPU_TIMER_RETRY_TIMER for full explanation.
*
* @param g [in] GPU driver structure.
* @param timeout [in] Timeout object to initialize.
* @param duration [in] Timeout duration in number of retries.
*/
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_count);
/**
* @brief Check the timeout status.
*

View File

@@ -936,8 +936,7 @@ __acquires(&cde_app->mutex)
struct gk20a_cde_ctx *cde_ctx = NULL;
struct nvgpu_timeout timeout;
nvgpu_timeout_init(g, &timeout, MAX_CTX_RETRY_TIME,
NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, MAX_CTX_RETRY_TIME);
do {
cde_ctx = gk20a_cde_do_get_context(l);

View File

@@ -774,8 +774,7 @@ int gk20a_block_new_jobs_and_idle(struct gk20a *g)
nvgpu_mutex_acquire(&platform->railgate_lock);
nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, GK20A_WAIT_FOR_IDLE_MS);
/* check and wait until GPU is idle (with a timeout) */
do {
@@ -1395,8 +1394,7 @@ static int gk20a_pm_suspend(struct device *dev)
return ret;
}
nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
NVGPU_TIMER_CPU_TIMER);
nvgpu_timeout_init_cpu_timer(g, &timeout, GK20A_WAIT_FOR_IDLE_MS);
/*
* Hold back deterministic submits and changes to deterministic
* channels - this must be outside the power busy locks.

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,24 @@ static int nvgpu_timeout_is_pre_silicon(struct nvgpu_timeout *timeout)
return !nvgpu_platform_is_silicon(timeout->g);
}
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_ms)
{
int err = nvgpu_timeout_init_flags(g, timeout, duration_ms,
NVGPU_TIMER_CPU_TIMER);
nvgpu_assert(err == 0);
}
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_count)
{
int err = nvgpu_timeout_init_flags(g, timeout, duration_count,
NVGPU_TIMER_RETRY_TIMER);
nvgpu_assert(err == 0);
}
/**
* nvgpu_timeout_init - Init timer.
*
@@ -55,7 +73,7 @@ static int nvgpu_timeout_is_pre_silicon(struct nvgpu_timeout *timeout)
* If neither %NVGPU_TIMER_CPU_TIMER or %NVGPU_TIMER_RETRY_TIMER is passed then
* a CPU timer is used by default.
*/
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration, unsigned long flags)
{
if (flags & ~NVGPU_TIMER_FLAG_MASK)

View File

@@ -122,7 +122,25 @@ static bool time_after(s64 a, s64 b)
return (nvgpu_safe_sub_s64(a, b) > 0);
}
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_ms)
{
int err = nvgpu_timeout_init_flags(g, timeout, duration_ms,
NVGPU_TIMER_CPU_TIMER);
nvgpu_assert(err == 0);
}
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration_count)
{
int err = nvgpu_timeout_init_flags(g, timeout, duration_count,
NVGPU_TIMER_RETRY_TIMER);
nvgpu_assert(err == 0);
}
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
u32 duration, unsigned long flags)
{
s64 duration_ns;

View File

@@ -682,7 +682,9 @@ nvgpu_rwsem_down_write
nvgpu_rwsem_up_read
nvgpu_rwsem_up_write
nvgpu_timeout_expired_fault_injection
nvgpu_timeout_init
nvgpu_timeout_init_cpu_timer
nvgpu_timeout_init_flags
nvgpu_timeout_init_retry
nvgpu_timeout_peek_expired
nvgpu_timers_get_fault_injection
nvgpu_tsg_abort

View File

@@ -699,7 +699,9 @@ nvgpu_rwsem_down_write
nvgpu_rwsem_up_read
nvgpu_rwsem_up_write
nvgpu_timeout_expired_fault_injection
nvgpu_timeout_init
nvgpu_timeout_init_cpu_timer
nvgpu_timeout_init_flags
nvgpu_timeout_init_retry
nvgpu_timeout_peek_expired
nvgpu_timers_get_fault_injection
nvgpu_tsg_store_sm_error_state

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -64,7 +64,7 @@ int test_timer_init(struct unit_module *m,
flags = NVGPU_TIMER_CPU_TIMER;
}
ret = nvgpu_timeout_init(g, &test_timeout,
ret = nvgpu_timeout_init_flags(g, &test_timeout,
duration,
flags);
@@ -92,7 +92,7 @@ int test_timer_init_err(struct unit_module *m,
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
/* nvgpu_tiemout_init accepts only BIT(0), BIT(8), and BIT(9) as
* valid flag bits. So ret should be EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, (1 << i));
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, (1 << i));
if ((i == 0) || (i == 8) || (i == 9)) {
if (ret != 0) {
@@ -110,55 +110,55 @@ int test_timer_init_err(struct unit_module *m,
}
/* BIT(0), BIT(8) and BIT(9) set. Return value should be 0 */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x301);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x301);
if (ret != 0) {
unit_return_fail(m,"Timer init failed with flag 0x301\n");
}
/* BIT(8) and BIT(9) set. Return value should be 0 */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x300);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x300);
if (ret != 0) {
unit_return_fail(m,"Timer init failed with flag 0x300\n");
}
/* BIT(0) and BIT(8) set. Return value should be 0 */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x101);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x101);
if (ret != 0) {
unit_return_fail(m,"Timer init failed with flag 0x101\n");
}
/* BIT(0) and BIT(9) set. Return value should be 0 */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x201);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x201);
if (ret != 0) {
unit_return_fail(m,"Timer init failed with flag 0x201\n");
}
/* BIT(0), BIT(7) and BIT(9) set. Return value should be -EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x281);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x281);
if (ret != -EINVAL) {
unit_return_fail(m,"Timer init failed with flag 0x281\n");
}
/* BIT(5), BIT(7) and BIT(9) set. Return value should be -EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x2A0);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x2A0);
if (ret != -EINVAL) {
unit_return_fail(m,"Timer init failed with flag 0x2A0\n");
}
/* BIT(1), BIT(2) and BIT(3) set. Return value should be -EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x00E);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x00E);
if (ret != -EINVAL) {
unit_return_fail(m,"Timer init failed with flag 0x00E\n");
}
/* BIT(1) to BIT(7) set. Return value should be -EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x07E);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x07E);
if (ret != -EINVAL) {
unit_return_fail(m,"Timer init failed with flag 0x07E\n");
}
/* All bits set. Return value should be -EINVAL */
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0xFFFFFFFFFFFFFFFF);
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0xFFFFFFFFFFFFFFFF);
if (ret != -EINVAL) {
unit_return_fail(m,"Timer init failed with flag all 1s\n");
}
@@ -169,17 +169,9 @@ int test_timer_init_err(struct unit_module *m,
int test_timer_counter(struct unit_module *m,
struct gk20a *g, void *args)
{
int ret;
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
ret = nvgpu_timeout_init(g, &test_timeout,
TEST_TIMER_COUNT,
NVGPU_TIMER_RETRY_TIMER);
if (ret != 0) {
unit_return_fail(m, "Timer init failed %d\n", ret);
}
nvgpu_timeout_init_retry(g, &test_timeout, TEST_TIMER_COUNT);
do {
usleep(1);
@@ -200,13 +192,7 @@ int test_timer_duration(struct unit_module *m,
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
ret = nvgpu_timeout_init(g, &test_timeout,
TEST_TIMER_DURATION,
NVGPU_TIMER_CPU_TIMER);
if (ret != 0) {
unit_return_fail(m, "Timer init failed %d\n", ret);
}
nvgpu_timeout_init_cpu_timer(g, &test_timeout, TEST_TIMER_DURATION);
/*
* Timer should not be expired.
@@ -251,7 +237,7 @@ int test_timer_fault_injection(struct unit_module *m,
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
ret = nvgpu_timeout_init(g, &test_timeout,
ret = nvgpu_timeout_init_flags(g, &test_timeout,
TEST_TIMER_DURATION,
NVGPU_TIMER_CPU_TIMER);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -36,7 +36,7 @@
*
* Test Type: Feature
*
* Targets: nvgpu_timeout_init
* Targets: nvgpu_timeout_init_flags
*
* Inputs:
* 1) The type of timer to be tested is passed as an argument to the test.
@@ -69,7 +69,7 @@ int test_timer_init(struct unit_module *m,
*
* Test Type: Boundary values
*
* Targets: nvgpu_timeout_init
* Targets: nvgpu_timeout_init_flags
*
* Inputs:
* 1) Global nvgpu_timeout structure instance.
@@ -98,7 +98,7 @@ int test_timer_init_err(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_timeout_init, nvgpu_timeout_expired,
* Targets: nvgpu_timeout_init_retry, nvgpu_timeout_expired,
* nvgpu_timeout_peek_expired
*
* Input:
@@ -131,7 +131,7 @@ int test_timer_counter(struct unit_module *m,
*
* Test Type: Feature
*
* Targets: nvgpu_timeout_init, nvgpu_timeout_expired,
* Targets: nvgpu_timeout_init_cpu_timer, nvgpu_timeout_expired,
* nvgpu_timeout_peek_expired
*
* Input: