mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: simplify nvgpu_timeout_init
nvgpu_timeout_init() returns an error code only when the flags parameter is invalid. There are very few possible values for flags, so extract the two most common cases - cpu clock based and a retry based timeout - to functions that cannot fail and thus return nothing. Adjust all callers to use those, simplfying error handling quite a bit. Change-Id: I985fe7fa988ebbae25601d15cf57fd48eda0c677 Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2613833 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
9b3f3ea4be
commit
f4ec400d5f
@@ -93,7 +93,7 @@ int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout to;
|
struct nvgpu_timeout to;
|
||||||
struct gk20a *g;
|
struct gk20a *g;
|
||||||
int status;
|
int status = 0;
|
||||||
|
|
||||||
if (!is_falcon_valid(flcn)) {
|
if (!is_falcon_valid(flcn)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -101,10 +101,7 @@ int nvgpu_falcon_wait_for_halt(struct nvgpu_falcon *flcn, unsigned int timeout)
|
|||||||
|
|
||||||
g = flcn->g;
|
g = flcn->g;
|
||||||
|
|
||||||
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
|
||||||
if (status != 0) {
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (g->ops.falcon.is_falcon_cpu_halted(flcn)) {
|
if (g->ops.falcon.is_falcon_cpu_halted(flcn)) {
|
||||||
@@ -125,7 +122,6 @@ int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn)
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
struct gk20a *g;
|
struct gk20a *g;
|
||||||
int status;
|
|
||||||
|
|
||||||
if (!is_falcon_valid(flcn)) {
|
if (!is_falcon_valid(flcn)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -133,10 +129,7 @@ int nvgpu_falcon_wait_idle(struct nvgpu_falcon *flcn)
|
|||||||
|
|
||||||
g = flcn->g;
|
g = flcn->g;
|
||||||
|
|
||||||
status = nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 2000);
|
||||||
if (status != 0) {
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wait for falcon idle */
|
/* wait for falcon idle */
|
||||||
do {
|
do {
|
||||||
@@ -166,7 +159,7 @@ int nvgpu_falcon_mem_scrub_wait(struct nvgpu_falcon *flcn)
|
|||||||
const u32 mem_scrubbing_max_timeout = 1000U;
|
const u32 mem_scrubbing_max_timeout = 1000U;
|
||||||
const u32 mem_scrubbing_default_timeout = 10U;
|
const u32 mem_scrubbing_default_timeout = 10U;
|
||||||
struct gk20a *g;
|
struct gk20a *g;
|
||||||
int status;
|
int status = 0;
|
||||||
|
|
||||||
if (!is_falcon_valid(flcn)) {
|
if (!is_falcon_valid(flcn)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -175,13 +168,9 @@ int nvgpu_falcon_mem_scrub_wait(struct nvgpu_falcon *flcn)
|
|||||||
g = flcn->g;
|
g = flcn->g;
|
||||||
|
|
||||||
/* check IMEM/DMEM scrubbing complete status */
|
/* check IMEM/DMEM scrubbing complete status */
|
||||||
status = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
mem_scrubbing_max_timeout /
|
mem_scrubbing_max_timeout /
|
||||||
mem_scrubbing_default_timeout,
|
mem_scrubbing_default_timeout);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (status != 0) {
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (g->ops.falcon.is_falcon_scrubbing_done(flcn)) {
|
if (g->ops.falcon.is_falcon_scrubbing_done(flcn)) {
|
||||||
@@ -670,7 +659,7 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout to;
|
struct nvgpu_timeout to;
|
||||||
struct gk20a *g;
|
struct gk20a *g;
|
||||||
int status;
|
int status = 0;
|
||||||
|
|
||||||
if (!is_falcon_valid(flcn)) {
|
if (!is_falcon_valid(flcn)) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -678,10 +667,7 @@ int nvgpu_falcon_clear_halt_intr_status(struct nvgpu_falcon *flcn,
|
|||||||
|
|
||||||
g = flcn->g;
|
g = flcn->g;
|
||||||
|
|
||||||
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
|
||||||
if (status != 0) {
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (g->ops.falcon.clear_halt_interrupt_status(flcn)) {
|
if (g->ops.falcon.clear_halt_interrupt_status(flcn)) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -139,15 +139,11 @@ void nvgpu_channel_worker_poll_init(struct nvgpu_worker *worker)
|
|||||||
{
|
{
|
||||||
struct nvgpu_channel_worker *ch_worker =
|
struct nvgpu_channel_worker *ch_worker =
|
||||||
nvgpu_channel_worker_from_worker(worker);
|
nvgpu_channel_worker_from_worker(worker);
|
||||||
int ret;
|
|
||||||
|
|
||||||
ch_worker->watchdog_interval = 100U;
|
ch_worker->watchdog_interval = 100U;
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(worker->g, &ch_worker->timeout,
|
nvgpu_timeout_init_cpu_timer(worker->g, &ch_worker->timeout,
|
||||||
ch_worker->watchdog_interval, NVGPU_TIMER_CPU_TIMER);
|
ch_worker->watchdog_interval);
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(worker->g, "timeout_init failed: %d", ret);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -176,16 +172,11 @@ void nvgpu_channel_worker_poll_wakeup_post_process_item(
|
|||||||
|
|
||||||
struct nvgpu_channel_worker *ch_worker =
|
struct nvgpu_channel_worker *ch_worker =
|
||||||
nvgpu_channel_worker_from_worker(worker);
|
nvgpu_channel_worker_from_worker(worker);
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (nvgpu_timeout_peek_expired(&ch_worker->timeout)) {
|
if (nvgpu_timeout_peek_expired(&ch_worker->timeout)) {
|
||||||
nvgpu_channel_poll_wdt(g);
|
nvgpu_channel_poll_wdt(g);
|
||||||
ret = nvgpu_timeout_init(g, &ch_worker->timeout,
|
nvgpu_timeout_init_cpu_timer(g, &ch_worker->timeout,
|
||||||
ch_worker->watchdog_interval,
|
ch_worker->watchdog_interval);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", ret);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -339,7 +339,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int ret = 0, err = 0;
|
int ret = 0;
|
||||||
u32 i, host_num_engines;
|
u32 i, host_num_engines;
|
||||||
struct nvgpu_engine_status_info engine_status;
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
@@ -348,11 +348,7 @@ int nvgpu_engine_wait_for_idle(struct gk20a *g)
|
|||||||
host_num_engines =
|
host_num_engines =
|
||||||
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
|
nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < host_num_engines; i++) {
|
for (i = 0; i < host_num_engines; i++) {
|
||||||
if (!nvgpu_engine_check_valid_id(g, i)) {
|
if (!nvgpu_engine_check_valid_id(g, i)) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -91,15 +91,8 @@ static void nvgpu_channel_wdt_init(struct nvgpu_channel_wdt *wdt,
|
|||||||
struct nvgpu_channel_wdt_state *state)
|
struct nvgpu_channel_wdt_state *state)
|
||||||
{
|
{
|
||||||
struct gk20a *g = wdt->g;
|
struct gk20a *g = wdt->g;
|
||||||
int ret;
|
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &wdt->timer,
|
nvgpu_timeout_init_cpu_timer(g, &wdt->timer, wdt->limit_ms);
|
||||||
wdt->limit_ms,
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", ret);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
wdt->ch_state = *state;
|
wdt->ch_state = *state;
|
||||||
wdt->running = true;
|
wdt->running = true;
|
||||||
|
|||||||
@@ -79,10 +79,7 @@ static int gsp_write_cmd(struct nvgpu_gsp *gsp,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = nvgpu_gsp_queue_push(gsp->queues, queue_id, gsp->gsp_flcn,
|
err = nvgpu_gsp_queue_push(gsp->queues, queue_id, gsp->gsp_flcn,
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ int nvgpu_gsp_wait_message_cond(struct nvgpu_gsp *gsp, u32 timeout_ms,
|
|||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (*(u8 *)var == val) {
|
if (*(u8 *)var == val) {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -41,16 +41,12 @@
|
|||||||
void nvgpu_vidmem_destroy(struct gk20a *g)
|
void nvgpu_vidmem_destroy(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
|
|
||||||
if (g->ops.fb.get_vidmem_size == NULL) {
|
if (g->ops.fb.get_vidmem_size == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 100);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensure that the thread runs one last time to flush anything in the
|
* Ensure that the thread runs one last time to flush anything in the
|
||||||
@@ -98,13 +94,7 @@ static int nvgpu_vidmem_clear_fence_wait(struct gk20a *g,
|
|||||||
bool done;
|
bool done;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
nvgpu_get_poll_timeout(g),
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = nvgpu_fence_wait(g, fence_out,
|
err = nvgpu_fence_wait(g, fence_out,
|
||||||
|
|||||||
@@ -1636,12 +1636,7 @@ static int nvgpu_vm_unmap_sync_buffer(struct vm_gk20a *vm,
|
|||||||
/*
|
/*
|
||||||
* 100ms timer.
|
* 100ms timer.
|
||||||
*/
|
*/
|
||||||
ret = nvgpu_timeout_init(vm->mm->g, &timeout, 100,
|
nvgpu_timeout_init_cpu_timer(vm->mm->g, &timeout, 100);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(vm->mm->g, "timeout_init failed (%d)", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
nvgpu_mutex_release(&vm->update_gmmu_lock);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -98,12 +98,7 @@ int nvgpu_nvlink_minion_load(struct gk20a *g)
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Minion boot timeout init failed");
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = g->ops.nvlink.minion.is_boot_complete(g, &boot_cmplte);
|
err = g->ops.nvlink.minion.is_boot_complete(g, &boot_cmplte);
|
||||||
|
|||||||
@@ -163,11 +163,7 @@ int nvgpu_perfbuf_update_get_put(struct gk20a *g, u64 bytes_consumed,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (update_available_bytes && wait && available_bytes_va != NULL) {
|
if (update_available_bytes && wait && available_bytes_va != NULL) {
|
||||||
err = nvgpu_timeout_init(g, &timeout, 10000, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, 10000);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (*available_bytes_va != 0xffffffff) {
|
if (*available_bytes_va != 0xffffffff) {
|
||||||
|
|||||||
@@ -108,15 +108,9 @@ int nvgpu_pmu_wait_fw_ack_status(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
u32 timeout_ms, void *var, u8 val)
|
u32 timeout_ms, void *var, u8 val)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
unsigned int delay = POLL_DELAY_MIN_US;
|
unsigned int delay = POLL_DELAY_MIN_US;
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, timeout_ms,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "PMU wait timeout init failed.");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
nvgpu_rmb();
|
nvgpu_rmb();
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -158,11 +158,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, U32_MAX, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, U32_MAX);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timer");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = nvgpu_pmu_queue_push(&pmu->queues, pmu->flcn,
|
err = nvgpu_pmu_queue_push(&pmu->queues, pmu->flcn,
|
||||||
|
|||||||
@@ -788,7 +788,6 @@ static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
struct nvgpu_pmu_pg *pg)
|
struct nvgpu_pmu_pg *pg)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
/* make sure the pending operations are finished before we continue */
|
/* make sure the pending operations are finished before we continue */
|
||||||
if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
if (nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
||||||
@@ -800,12 +799,7 @@ static void pmu_pg_kill_task(struct gk20a *g, struct nvgpu_pmu *pmu,
|
|||||||
nvgpu_thread_stop(&pg->pg_init.state_task);
|
nvgpu_thread_stop(&pg->pg_init.state_task);
|
||||||
|
|
||||||
/* wait to confirm thread stopped */
|
/* wait to confirm thread stopped */
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000,
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed err=%d", err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
if (!nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
if (!nvgpu_thread_is_running(&pg->pg_init.state_task)) {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -332,14 +332,11 @@ static int nvgpu_pmu_wait_for_priv_lockdown_release(struct gk20a *g,
|
|||||||
struct nvgpu_falcon *flcn, unsigned int timeout)
|
struct nvgpu_falcon *flcn, unsigned int timeout)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout to;
|
struct nvgpu_timeout to;
|
||||||
int status;
|
int status = 0;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
status = nvgpu_timeout_init(g, &to, timeout, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &to, timeout);
|
||||||
if (status != 0) {
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* poll for priv lockdown release */
|
/* poll for priv lockdown release */
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -72,7 +72,7 @@ static int sec2_write_cmd(struct nvgpu_sec2 *sec2,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
err = nvgpu_sec2_queue_push(sec2->queues, queue_id, &sec2->flcn,
|
err = nvgpu_sec2_queue_push(sec2->queues, queue_id, &sec2->flcn,
|
||||||
|
|||||||
@@ -258,7 +258,7 @@ int nvgpu_sec2_wait_message_cond(struct nvgpu_sec2 *sec2, u32 timeout_ms,
|
|||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (*(u8 *)var == val) {
|
if (*(u8 *)var == val) {
|
||||||
|
|||||||
@@ -853,13 +853,8 @@ u32 nvgpu_bios_read_u32(struct gk20a *g, u32 offset)
|
|||||||
bool nvgpu_bios_wait_for_init_done(struct gk20a *g)
|
bool nvgpu_bios_wait_for_init_done(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS);
|
||||||
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wait till vbios is completed */
|
/* Wait till vbios is completed */
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -85,14 +85,9 @@ int gv100_bios_preos_wait_for_halt(struct gk20a *g)
|
|||||||
g->ops.bus.write_sw_scratch(g, SCRATCH_PMU_EXIT_AND_HALT,
|
g->ops.bus.write_sw_scratch(g, SCRATCH_PMU_EXIT_AND_HALT,
|
||||||
PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
|
PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES));
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
PMU_BOOT_TIMEOUT_MAX /
|
PMU_BOOT_TIMEOUT_MAX /
|
||||||
PMU_BOOT_TIMEOUT_DEFAULT,
|
PMU_BOOT_TIMEOUT_DEFAULT);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "NVGPU timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
progress = g->ops.bus.read_sw_scratch(g,
|
progress = g->ops.bus.read_sw_scratch(g,
|
||||||
@@ -180,14 +175,10 @@ int gv100_bios_devinit(struct gk20a *g)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
PMU_BOOT_TIMEOUT_MAX /
|
PMU_BOOT_TIMEOUT_MAX /
|
||||||
PMU_BOOT_TIMEOUT_DEFAULT,
|
PMU_BOOT_TIMEOUT_DEFAULT);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu timeout init failed %d", err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
top_scratch1_reg = g->ops.top.read_top_scratch1_reg(g);
|
top_scratch1_reg = g->ops.top.read_top_scratch1_reg(g);
|
||||||
devinit_completed = ((g->ops.falcon.is_falcon_cpu_halted(
|
devinit_completed = ((g->ops.falcon.is_falcon_cpu_halted(
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -119,13 +119,8 @@ int tu104_bios_verify_devinit(struct gk20a *g)
|
|||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 val;
|
u32 val;
|
||||||
u32 aon_secure_scratch_reg;
|
u32 aon_secure_scratch_reg;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS);
|
||||||
NVGPU_BIOS_DEVINIT_VERIFY_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
aon_secure_scratch_reg = g->ops.bios.get_aon_secure_scratch_reg(g, 0);
|
aon_secure_scratch_reg = g->ops.bios.get_aon_secure_scratch_reg(g, 0);
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B MMU
|
* GM20B MMU
|
||||||
*
|
*
|
||||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -50,11 +50,7 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
|||||||
bus_bar1_block_target_vid_mem_f()) |
|
bus_bar1_block_target_vid_mem_f()) |
|
||||||
bus_bar1_block_mode_virtual_f() |
|
bus_bar1_block_mode_virtual_f() |
|
||||||
bus_bar1_block_ptr_f(ptr_v));
|
bus_bar1_block_ptr_f(ptr_v));
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 val = gk20a_readl(g, bus_bind_status_r());
|
u32 val = gk20a_readl(g, bus_bind_status_r());
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2014-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -47,11 +47,7 @@ int gp10b_bus_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
|
|||||||
bus_bar2_block_target_vid_mem_f()) |
|
bus_bar2_block_target_vid_mem_f()) |
|
||||||
bus_bar2_block_mode_virtual_f() |
|
bus_bar2_block_mode_virtual_f() |
|
||||||
bus_bar2_block_ptr_f(ptr_v));
|
bus_bar2_block_ptr_f(ptr_v));
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 val = gk20a_readl(g, bus_bind_status_r());
|
u32 val = gk20a_readl(g, bus_bind_status_r());
|
||||||
|
|||||||
@@ -66,10 +66,7 @@ int bus_tu104_bar2_bind(struct gk20a *g, struct nvgpu_mem *bar2_inst)
|
|||||||
|
|
||||||
nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", ptr_v);
|
nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", ptr_v);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_func_writel(g, func_priv_bar2_block_r(),
|
nvgpu_func_writel(g, func_priv_bar2_block_r(),
|
||||||
nvgpu_aperture_mask(g, bar2_inst,
|
nvgpu_aperture_mask(g, bar2_inst,
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GM20B CBC
|
* GM20B CBC
|
||||||
*
|
*
|
||||||
* Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -180,8 +180,7 @@ int gm20b_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
|
|||||||
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
||||||
ltc * ltc_stride + slice * lts_stride;
|
ltc * ltc_stride + slice * lts_stride;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, 2000,
|
nvgpu_timeout_init_retry(g, &timeout, 2000);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
do {
|
do {
|
||||||
val = gk20a_readl(g, ctrl1);
|
val = gk20a_readl(g, ctrl1);
|
||||||
if ((val & hw_op) == 0U) {
|
if ((val & hw_op) == 0U) {
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GP10B CBC
|
* GP10B CBC
|
||||||
*
|
*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -180,8 +180,7 @@ int gp10b_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
|
|||||||
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
||||||
ltc * ltc_stride + slice * lts_stride;
|
ltc * ltc_stride + slice * lts_stride;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, 2000,
|
nvgpu_timeout_init_retry(g, &timeout, 2000);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
do {
|
do {
|
||||||
val = gk20a_readl(g, ctrl1);
|
val = gk20a_readl(g, ctrl1);
|
||||||
if ((val & hw_op) == 0U) {
|
if ((val & hw_op) == 0U) {
|
||||||
|
|||||||
@@ -179,8 +179,7 @@ int tu104_cbc_ctrl(struct gk20a *g, enum nvgpu_cbc_op op,
|
|||||||
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
ctrl1 = ltc_ltc0_lts0_cbc_ctrl1_r() +
|
||||||
ltc * ltc_stride + slice * lts_stride;
|
ltc * ltc_stride + slice * lts_stride;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, 2000,
|
nvgpu_timeout_init_retry(g, &timeout, 2000);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
do {
|
do {
|
||||||
val = nvgpu_readl(g, ctrl1);
|
val = nvgpu_readl(g, ctrl1);
|
||||||
if ((val & hw_op) == 0U) {
|
if ((val & hw_op) == 0U) {
|
||||||
|
|||||||
@@ -268,15 +268,10 @@ void ga10b_fb_dump_vpr_info(struct gk20a *g)
|
|||||||
static int ga10b_fb_vpr_mode_fetch_poll(struct gk20a *g, unsigned int poll_ms)
|
static int ga10b_fb_vpr_mode_fetch_poll(struct gk20a *g, unsigned int poll_ms)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
u32 val = 0U;
|
u32 val = 0U;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, poll_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, poll_ms);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
val = nvgpu_readl(g, fb_mmu_vpr_mode_r());
|
val = nvgpu_readl(g, fb_mmu_vpr_mode_r());
|
||||||
|
|||||||
@@ -100,12 +100,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
|||||||
trace_gk20a_mm_tlb_invalidate(g->name);
|
trace_gk20a_mm_tlb_invalidate(g->name);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init(mmu fifo space) failed err=%d",
|
|
||||||
err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
data = gk20a_readl(g, fb_mmu_ctrl_r());
|
data = gk20a_readl(g, fb_mmu_ctrl_r());
|
||||||
@@ -121,12 +116,7 @@ int gm20b_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init(mmu invalidate) failed err=%d",
|
|
||||||
err);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
|
gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
|
||||||
fb_mmu_invalidate_pdb_addr_f(addr_lo) |
|
fb_mmu_invalidate_pdb_addr_f(addr_lo) |
|
||||||
@@ -228,13 +218,8 @@ static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
|
|||||||
unsigned int msec)
|
unsigned int msec)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, msec, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, msec);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -170,14 +170,8 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
|
|||||||
} else {
|
} else {
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
|
reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
|
||||||
g->ops.fb.write_mmu_fault_buffer_size(g, index, reg_val);
|
g->ops.fb.write_mmu_fault_buffer_size(g, index, reg_val);
|
||||||
@@ -695,12 +689,8 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_writel(g, fb_mmu_invalidate_r(), reg_val);
|
nvgpu_writel(g, fb_mmu_invalidate_r(), reg_val);
|
||||||
|
|
||||||
/* retry 200 times */
|
nvgpu_timeout_init_retry(g, &timeout, 200U);
|
||||||
err = nvgpu_timeout_init(g, &timeout, 200U, NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init() failed err=%d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
reg_val = nvgpu_readl(g, fb_mmu_ctrl_r());
|
reg_val = nvgpu_readl(g, fb_mmu_ctrl_r());
|
||||||
if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) !=
|
if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) !=
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -208,11 +208,7 @@ int tu104_fb_mmu_invalidate_replay(struct gk20a *g,
|
|||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
/* retry 200 times */
|
nvgpu_timeout_init_retry(g, &timeout, 200U);
|
||||||
err = nvgpu_timeout_init(g, &timeout, 200U, NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&g->mm.tlb_lock);
|
nvgpu_mutex_acquire(&g->mm.tlb_lock);
|
||||||
|
|
||||||
|
|||||||
@@ -62,10 +62,7 @@ int fb_tu104_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
|||||||
|
|
||||||
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
|
addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&g->mm.tlb_lock);
|
nvgpu_mutex_acquire(&g->mm.tlb_lock);
|
||||||
|
|
||||||
@@ -170,12 +167,8 @@ static int tu104_fb_wait_mmu_bind(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 val;
|
u32 val;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
nvgpu_timeout_init_retry(g, &timeout, 1000);
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
val = nvgpu_readl(g, fb_mmu_bind_r());
|
val = nvgpu_readl(g, fb_mmu_bind_r());
|
||||||
|
|||||||
@@ -214,7 +214,6 @@ int ga10b_fb_vab_dump_and_clear(struct gk20a *g, u64 *user_buf,
|
|||||||
* poll NV_PFB_PRI_MMU_VIDMEM_ACCESS_BIT_DUMP_TRIGGER to be cleared
|
* poll NV_PFB_PRI_MMU_VIDMEM_ACCESS_BIT_DUMP_TRIGGER to be cleared
|
||||||
* clear what? buffer or access bits or buffer_put_ptr
|
* clear what? buffer or access bits or buffer_put_ptr
|
||||||
*/
|
*/
|
||||||
int err;
|
|
||||||
struct nvgpu_mem *vab_buf = &g->mm.vab.buffer;
|
struct nvgpu_mem *vab_buf = &g->mm.vab.buffer;
|
||||||
u64 buffer_offset = 0ULL;
|
u64 buffer_offset = 0ULL;
|
||||||
u64 req_buf_size = 0U;
|
u64 req_buf_size = 0U;
|
||||||
@@ -267,11 +266,7 @@ int ga10b_fb_vab_dump_and_clear(struct gk20a *g, u64 *user_buf,
|
|||||||
vab_dump_reg = nvgpu_readl(g, fb_mmu_vidmem_access_bit_dump_r());
|
vab_dump_reg = nvgpu_readl(g, fb_mmu_vidmem_access_bit_dump_r());
|
||||||
nvgpu_log(g, gpu_dbg_vab, "vab_dump_reg 0x%x", vab_dump_reg);
|
nvgpu_log(g, gpu_dbg_vab, "vab_dump_reg 0x%x", vab_dump_reg);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, 1000U, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, 1000U);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check if trigger is cleared vab bits collection complete */
|
/* Check if trigger is cleared vab bits collection complete */
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2014-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -105,11 +105,7 @@ void gm20b_fifo_trigger_mmu_fault(struct gk20a *g,
|
|||||||
fifo_trigger_mmu_fault_enable_f(1U));
|
fifo_trigger_mmu_fault_enable_f(1U));
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout init failed err=%d", ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Wait for MMU fault to trigger */
|
/* Wait for MMU fault to trigger */
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
|||||||
@@ -72,12 +72,7 @@ int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id,
|
|||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, nvgpu_preempt_get_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -102,14 +102,7 @@ int gv11b_fifo_preempt_poll_pbdma(struct gk20a *g, u32 tsgid,
|
|||||||
unsigned int loop_count = 0;
|
unsigned int loop_count = 0;
|
||||||
struct nvgpu_pbdma_status_info pbdma_status;
|
struct nvgpu_pbdma_status_info pbdma_status;
|
||||||
|
|
||||||
/* timeout in milli seconds */
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
||||||
ret = nvgpu_timeout_init(g, &timeout,
|
|
||||||
nvgpu_preempt_get_timeout(g),
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Default return value */
|
/* Default return value */
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
@@ -231,14 +224,7 @@ static int gv11b_fifo_preempt_poll_eng(struct gk20a *g, u32 id,
|
|||||||
u32 eng_intr_pending;
|
u32 eng_intr_pending;
|
||||||
struct nvgpu_engine_status_info engine_status;
|
struct nvgpu_engine_status_info engine_status;
|
||||||
|
|
||||||
/* timeout in milli seconds */
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_preempt_get_timeout(g));
|
||||||
ret = nvgpu_timeout_init(g, &timeout,
|
|
||||||
nvgpu_preempt_get_timeout(g),
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Default return value */
|
/* Default return value */
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
|||||||
@@ -89,11 +89,7 @@ int ga10b_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
|
|
||||||
runlist = g->fifo.runlists[runlist_id];
|
runlist = g->fifo.runlists[runlist_id];
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -71,12 +71,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", ret);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -70,11 +70,7 @@ int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -190,20 +190,14 @@ NVGPU_COV_WHITELIST_BLOCK_END(NVGPU_MISRA(Rule, 15_6))
|
|||||||
int gm20b_gr_falcon_wait_mem_scrubbing(struct gk20a *g)
|
int gm20b_gr_falcon_wait_mem_scrubbing(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
bool fecs_scrubbing;
|
bool fecs_scrubbing;
|
||||||
bool gpccs_scrubbing;
|
bool gpccs_scrubbing;
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
CTXSW_MEM_SCRUBBING_TIMEOUT_MAX_US /
|
CTXSW_MEM_SCRUBBING_TIMEOUT_MAX_US /
|
||||||
CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT_US,
|
CTXSW_MEM_SCRUBBING_TIMEOUT_DEFAULT_US);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "ctxsw mem scrub timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
fecs_scrubbing = (nvgpu_readl(g, gr_fecs_dmactl_r()) &
|
fecs_scrubbing = (nvgpu_readl(g, gr_fecs_dmactl_r()) &
|
||||||
@@ -433,12 +427,7 @@ static int gm20b_gr_falcon_ctx_wait_ucode(struct gk20a *g, u32 mailbox_id,
|
|||||||
delay = POLL_DELAY_MIN_US;
|
delay = POLL_DELAY_MIN_US;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "ctxsw wait ucode timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (check == WAIT_UCODE_LOOP) {
|
while (check == WAIT_UCODE_LOOP) {
|
||||||
if (nvgpu_timeout_expired(&timeout) != 0) {
|
if (nvgpu_timeout_expired(&timeout) != 0) {
|
||||||
|
|||||||
@@ -1683,8 +1683,7 @@ int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc, u32 sm,
|
|||||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||||
"GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
|
"GPC%d TPC%d SM%d: locking down SM", gpc, tpc, sm);
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
|
|
||||||
/* wait for the sm to lock down */
|
/* wait for the sm to lock down */
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -726,8 +726,7 @@ int gr_gp10b_suspend_contexts(struct gk20a *g,
|
|||||||
|
|
||||||
gr_ctx = tsg->gr_ctx;
|
gr_ctx = tsg->gr_ctx;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
do {
|
do {
|
||||||
if (!nvgpu_gr_ctx_get_cilp_preempt_pending(gr_ctx)) {
|
if (!nvgpu_gr_ctx_get_cilp_preempt_pending(gr_ctx)) {
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1256,7 +1256,6 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
|
|||||||
u32 dbgr_status0 = 0;
|
u32 dbgr_status0 = 0;
|
||||||
u32 warp_esr, global_esr;
|
u32 warp_esr, global_esr;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
u32 offset = nvgpu_gr_gpc_offset(g, gpc) +
|
u32 offset = nvgpu_gr_gpc_offset(g, gpc) +
|
||||||
nvgpu_gr_tpc_offset(g, tpc) +
|
nvgpu_gr_tpc_offset(g, tpc) +
|
||||||
nvgpu_gr_sm_offset(g, sm);
|
nvgpu_gr_sm_offset(g, sm);
|
||||||
@@ -1264,12 +1263,7 @@ int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g,
|
|||||||
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg,
|
||||||
"GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
|
"GPC%d TPC%d: locking down SM%d", gpc, tpc, sm);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, g->poll_timeout_default,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, g->poll_timeout_default);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* wait for the sm to lock down */
|
/* wait for the sm to lock down */
|
||||||
do {
|
do {
|
||||||
|
|||||||
@@ -328,17 +328,12 @@ void ga10b_gr_init_commit_global_timeslice(struct gk20a *g)
|
|||||||
int ga10b_gr_init_wait_idle(struct gk20a *g)
|
int ga10b_gr_init_wait_idle(struct gk20a *g)
|
||||||
{
|
{
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int err = 0;
|
|
||||||
bool gr_busy;
|
bool gr_busy;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_verbose | gpu_dbg_gr, " ");
|
nvgpu_log(g, gpu_dbg_verbose | gpu_dbg_gr, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/*
|
/*
|
||||||
@@ -468,16 +463,10 @@ int ga10b_gr_init_wait_empty(struct gk20a *g)
|
|||||||
u32 gr_status;
|
u32 gr_status;
|
||||||
u32 activity0, activity1, activity4;
|
u32 activity0, activity1, activity4;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
gr_status = nvgpu_readl(g, gr_status_r());
|
gr_status = nvgpu_readl(g, gr_status_r());
|
||||||
|
|||||||
@@ -180,7 +180,6 @@ int gm20b_gr_init_wait_idle(struct gk20a *g)
|
|||||||
{
|
{
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
u32 gr_engine_id;
|
u32 gr_engine_id;
|
||||||
int err = 0;
|
|
||||||
bool ctxsw_active;
|
bool ctxsw_active;
|
||||||
bool gr_busy;
|
bool gr_busy;
|
||||||
bool ctx_status_invalid;
|
bool ctx_status_invalid;
|
||||||
@@ -191,11 +190,7 @@ int gm20b_gr_init_wait_idle(struct gk20a *g)
|
|||||||
|
|
||||||
gr_engine_id = nvgpu_engine_get_gr_id(g);
|
gr_engine_id = nvgpu_engine_get_gr_id(g);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/*
|
/*
|
||||||
@@ -236,7 +231,6 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
|
|||||||
u32 val;
|
u32 val;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
#ifdef CONFIG_NVGPU_SIM
|
#ifdef CONFIG_NVGPU_SIM
|
||||||
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
|
if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
|
||||||
@@ -246,11 +240,7 @@ int gm20b_gr_init_wait_fe_idle(struct gk20a *g)
|
|||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_verbose, " ");
|
nvgpu_log(g, gpu_dbg_verbose, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
val = nvgpu_readl(g, gr_status_r());
|
val = nvgpu_readl(g, gr_status_r());
|
||||||
@@ -289,13 +279,9 @@ int gm20b_gr_init_fe_pwr_mode_force_on(struct gk20a *g, bool force_on)
|
|||||||
gr_fe_pwr_mode_mode_auto_f();
|
gr_fe_pwr_mode_mode_auto_f();
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
FE_PWR_MODE_TIMEOUT_MAX_US /
|
FE_PWR_MODE_TIMEOUT_MAX_US /
|
||||||
FE_PWR_MODE_TIMEOUT_DEFAULT_US,
|
FE_PWR_MODE_TIMEOUT_DEFAULT_US);
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
nvgpu_writel(g, gr_fe_pwr_mode_r(), reg_val);
|
nvgpu_writel(g, gr_fe_pwr_mode_r(), reg_val);
|
||||||
|
|
||||||
|
|||||||
@@ -65,16 +65,10 @@ int gp10b_gr_init_wait_empty(struct gk20a *g)
|
|||||||
u32 gr_status;
|
u32 gr_status;
|
||||||
u32 activity0, activity1, activity2, activity4;
|
u32 activity0, activity1, activity2, activity4;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
|
|
||||||
nvgpu_log_fn(g, " ");
|
nvgpu_log_fn(g, " ");
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* fmodel: host gets fifo_engine_status(gr) from gr
|
/* fmodel: host gets fifo_engine_status(gr) from gr
|
||||||
|
|||||||
@@ -74,19 +74,13 @@ static int gr_gv11b_ecc_scrub_is_done(struct gk20a *g,
|
|||||||
u32 val;
|
u32 val;
|
||||||
u32 gpc, tpc;
|
u32 gpc, tpc;
|
||||||
u32 gpc_offset, tpc_offset;
|
u32 gpc_offset, tpc_offset;
|
||||||
int err;
|
|
||||||
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE);
|
||||||
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
|
u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g,
|
||||||
GPU_LIT_TPC_IN_GPC_STRIDE);
|
GPU_LIT_TPC_IN_GPC_STRIDE);
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_retry(g, &timeout,
|
||||||
(GR_ECC_SCRUBBING_TIMEOUT_MAX_US /
|
(GR_ECC_SCRUBBING_TIMEOUT_MAX_US /
|
||||||
GR_ECC_SCRUBBING_TIMEOUT_DEFAULT_US),
|
GR_ECC_SCRUBBING_TIMEOUT_DEFAULT_US));
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "timeout_init failed: %d", err);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (gpc = 0; gpc < nvgpu_gr_config_get_gpc_count(gr_config); gpc++) {
|
for (gpc = 0; gpc < nvgpu_gr_config_get_gpc_count(gr_config); gpc++) {
|
||||||
gpc_offset = nvgpu_safe_mult_u32(gpc_stride, gpc);
|
gpc_offset = nvgpu_safe_mult_u32(gpc_stride, gpc);
|
||||||
|
|||||||
@@ -66,11 +66,7 @@ static int gm20b_ltc_wait_for_clean(struct gk20a *g)
|
|||||||
*
|
*
|
||||||
* So 5ms timeout here should be more than sufficient.
|
* So 5ms timeout here should be more than sufficient.
|
||||||
*/
|
*/
|
||||||
err = nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, 5);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 cmgmt1 = nvgpu_safe_add_u32(
|
u32 cmgmt1 = nvgpu_safe_add_u32(
|
||||||
@@ -100,11 +96,7 @@ static int gm20b_ltc_wait_for_invalidate(struct gk20a *g)
|
|||||||
u32 op_pending;
|
u32 op_pending;
|
||||||
|
|
||||||
/* Again, 5ms. */
|
/* Again, 5ms. */
|
||||||
err = nvgpu_timeout_init(g, &timeout, 5, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, 5);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "nvgpu_timeout_init failed err=%d", err);
|
|
||||||
return -ETIMEDOUT;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u32 cmgmt0 = nvgpu_safe_add_u32(
|
u32 cmgmt0 = nvgpu_safe_add_u32(
|
||||||
|
|||||||
@@ -75,14 +75,8 @@ static int ga10b_mc_poll_device_enable(struct gk20a *g, u32 reg_idx,
|
|||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, MC_ENGINE_RESET_DELAY_US,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, MC_ENGINE_RESET_DELAY_US);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
reg_val = nvgpu_readl(g, mc_device_enable_r(reg_idx));
|
reg_val = nvgpu_readl(g, mc_device_enable_r(reg_idx));
|
||||||
|
|
||||||
|
|||||||
5
drivers/gpu/nvgpu/hal/mm/cache/flush_gk20a.c
vendored
5
drivers/gpu/nvgpu/hal/mm/cache/flush_gk20a.c
vendored
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -50,8 +50,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
|
|||||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_CBC_CLEAN);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
nvgpu_timeout_init_retry(g, &timeout, retries);
|
||||||
NVGPU_TIMER_RETRY_TIMER) == 0);
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||||
|
|
||||||
|
|||||||
@@ -53,8 +53,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
|
|||||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
|
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_FB);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
nvgpu_timeout_init_retry(g, &timeout, retries);
|
||||||
NVGPU_TIMER_RETRY_TIMER) == 0);
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||||
|
|
||||||
@@ -118,8 +117,7 @@ static void gk20a_mm_l2_invalidate_locked(struct gk20a *g)
|
|||||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
|
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_INV);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
nvgpu_timeout_init_retry(g, &timeout, retries);
|
||||||
NVGPU_TIMER_RETRY_TIMER) == 0);
|
|
||||||
|
|
||||||
/* Invalidate any clean lines from the L2 so subsequent reads go to
|
/* Invalidate any clean lines from the L2 so subsequent reads go to
|
||||||
DRAM. Dirty lines are not affected by this operation. */
|
DRAM. Dirty lines are not affected by this operation. */
|
||||||
@@ -182,8 +180,7 @@ int gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
|
|||||||
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
|
retries = g->ops.mm.get_flush_retries(g, NVGPU_FLUSH_L2_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_assert(nvgpu_timeout_init(g, &timeout, retries,
|
nvgpu_timeout_init_retry(g, &timeout, retries);
|
||||||
NVGPU_TIMER_RETRY_TIMER) == 0);
|
|
||||||
|
|
||||||
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
nvgpu_mutex_acquire(&mm->l2_op_lock);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -241,12 +241,8 @@ int gv100_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
|
|||||||
/* Poll for links to go up */
|
/* Poll for links to go up */
|
||||||
links_off = (u32) link_mask;
|
links_off = (u32) link_mask;
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NVLINK_PLL_ON_TIMEOUT_MS);
|
||||||
NVLINK_PLL_ON_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "PLL ON timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
|
for_each_set_bit(bit, &link_mask, NVLINK_MAX_LINKS_SW) {
|
||||||
link_id = (u32)bit;
|
link_id = (u32)bit;
|
||||||
@@ -303,13 +299,8 @@ static int gv100_nvlink_rxcal_en(struct gk20a *g, unsigned long mask)
|
|||||||
for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) {
|
for_each_set_bit(bit, &mask, NVLINK_MAX_LINKS_SW) {
|
||||||
link_id = (u32)bit;
|
link_id = (u32)bit;
|
||||||
/* Timeout from HW specs */
|
/* Timeout from HW specs */
|
||||||
ret = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout,
|
||||||
8*NVLINK_SUBLINK_TIMEOUT_MS,
|
8*NVLINK_SUBLINK_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "Timeout threshold init failed");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
reg = DLPL_REG_RD32(g, link_id, nvl_br0_cfg_cal_r());
|
reg = DLPL_REG_RD32(g, link_id, nvl_br0_cfg_cal_r());
|
||||||
reg = set_field(reg, nvl_br0_cfg_cal_rxcal_m(),
|
reg = set_field(reg, nvl_br0_cfg_cal_rxcal_m(),
|
||||||
nvl_br0_cfg_cal_rxcal_on_f());
|
nvl_br0_cfg_cal_rxcal_on_f());
|
||||||
@@ -496,14 +487,9 @@ static int gv100_nvlink_link_sublink_check_change(struct gk20a *g, u32 link_id)
|
|||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 reg;
|
u32 reg;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NVLINK_SUBLINK_TIMEOUT_MS);
|
||||||
NVLINK_SUBLINK_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Sublink mode change timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
/* Poll for sublink status */
|
/* Poll for sublink status */
|
||||||
do {
|
do {
|
||||||
reg = DLPL_REG_RD32(g, link_id, nvl_sublink_change_r());
|
reg = DLPL_REG_RD32(g, link_id, nvl_sublink_change_r());
|
||||||
@@ -524,7 +510,7 @@ static int gv100_nvlink_link_sublink_check_change(struct gk20a *g, u32 link_id)
|
|||||||
if (nvgpu_timeout_peek_expired(&timeout)) {
|
if (nvgpu_timeout_peek_expired(&timeout)) {
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
|
int gv100_nvlink_link_set_sublink_mode(struct gk20a *g, u32 link_id,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -48,12 +48,8 @@ int tu104_nvlink_setup_pll(struct gk20a *g, unsigned long link_mask)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
|
||||||
NV_NVLINK_REG_POLL_TIMEOUT_MS, NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "Error during timeout init");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
reg = DLPL_REG_RD32(g, link_id, nvl_clk_status_r());
|
reg = DLPL_REG_RD32(g, link_id, nvl_clk_status_r());
|
||||||
if (nvl_clk_status_txclk_sts_v(reg) ==
|
if (nvl_clk_status_txclk_sts_v(reg) ==
|
||||||
@@ -78,14 +74,8 @@ u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
|
|||||||
{
|
{
|
||||||
u32 reg;
|
u32 reg;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Failed to init timeout: %d", err);
|
|
||||||
goto result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Poll till substate value becomes STABLE */
|
/* Poll till substate value becomes STABLE */
|
||||||
do {
|
do {
|
||||||
@@ -102,7 +92,6 @@ u32 tu104_nvlink_link_get_tx_sublink_state(struct gk20a *g, u32 link_id)
|
|||||||
nvl_sl0_slsm_status_tx_primary_state_v(reg),
|
nvl_sl0_slsm_status_tx_primary_state_v(reg),
|
||||||
nvl_sl0_slsm_status_tx_substate_v(reg));
|
nvl_sl0_slsm_status_tx_substate_v(reg));
|
||||||
|
|
||||||
result:
|
|
||||||
return nvl_sl0_slsm_status_tx_primary_state_unknown_v();
|
return nvl_sl0_slsm_status_tx_primary_state_unknown_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,14 +99,8 @@ u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
|
|||||||
{
|
{
|
||||||
u32 reg;
|
u32 reg;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Failed to init timeout: %d", err);
|
|
||||||
goto result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Poll till substate value becomes STABLE */
|
/* Poll till substate value becomes STABLE */
|
||||||
do {
|
do {
|
||||||
@@ -134,7 +117,6 @@ u32 tu104_nvlink_link_get_rx_sublink_state(struct gk20a *g, u32 link_id)
|
|||||||
nvl_sl1_slsm_status_rx_primary_state_v(reg),
|
nvl_sl1_slsm_status_rx_primary_state_v(reg),
|
||||||
nvl_sl1_slsm_status_rx_substate_v(reg));
|
nvl_sl1_slsm_status_rx_substate_v(reg));
|
||||||
|
|
||||||
result:
|
|
||||||
return nvl_sl1_slsm_status_rx_primary_state_unknown_v();
|
return nvl_sl1_slsm_status_rx_primary_state_unknown_v();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -104,14 +104,8 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
|
|||||||
u32 reg;
|
u32 reg;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, nvgpu_get_poll_timeout(g),
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "Minion cmd complete timeout init failed");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reg = MINION_REG_RD32(g, minion_nvlink_dl_cmd_r(link_id));
|
reg = MINION_REG_RD32(g, minion_nvlink_dl_cmd_r(link_id));
|
||||||
@@ -145,7 +139,7 @@ static int gv100_nvlink_minion_command_complete(struct gk20a *g, u32 link_id)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_log(g, gpu_dbg_nvlink, "minion cmd Complete");
|
nvgpu_log(g, gpu_dbg_nvlink, "minion cmd Complete");
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 gv100_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
|
u32 gv100_nvlink_minion_get_dlcmd_ordinal(struct gk20a *g,
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -58,12 +58,7 @@ int tu104_nvlink_rxdet(struct gk20a *g, u32 link_id)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, NV_NVLINK_REG_POLL_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
if (ret != 0) {
|
|
||||||
nvgpu_err(g, "Error during timeout init");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_link_rxdet_status_r());
|
reg = DLPL_REG_RD32(g, link_id, nvl_sl0_link_rxdet_status_r());
|
||||||
|
|||||||
@@ -665,13 +665,8 @@ int ga10b_perf_wait_for_idle_pma(struct gk20a *g)
|
|||||||
u32 status, rbufempty_status;
|
u32 status, rbufempty_status;
|
||||||
u32 timeout_ms = 1;
|
u32 timeout_ms = 1;
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());
|
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());
|
||||||
|
|||||||
@@ -535,13 +535,8 @@ static int poll_for_pmm_router_idle(struct gk20a *g, u32 offset, u32 timeout_ms)
|
|||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
u32 status;
|
u32 status;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reg_val = nvgpu_readl(g, offset);
|
reg_val = nvgpu_readl(g, offset);
|
||||||
@@ -624,13 +619,8 @@ int gv11b_perf_wait_for_idle_pma(struct gk20a *g)
|
|||||||
u32 status, rbufempty_status;
|
u32 status, rbufempty_status;
|
||||||
u32 timeout_ms = 1;
|
u32 timeout_ms = 1;
|
||||||
u32 reg_val;
|
u32 reg_val;
|
||||||
int err;
|
|
||||||
|
|
||||||
err = nvgpu_timeout_init(g, &timeout, timeout_ms, NVGPU_TIMER_CPU_TIMER);
|
nvgpu_timeout_init_cpu_timer(g, &timeout, timeout_ms);
|
||||||
if (err != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());
|
reg_val = nvgpu_readl(g, perf_pmasys_enginestatus_r());
|
||||||
|
|||||||
@@ -242,11 +242,7 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
|||||||
gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
|
gk20a_writel(g, xp_dl_mgr_r(0), dl_mgr);
|
||||||
xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
|
xv_sc_dbg(g, DL_SAFE_MODE, " Done!");
|
||||||
|
|
||||||
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER) != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
|
xv_sc_dbg(g, CHECK_LINK, "Checking for link idle...");
|
||||||
do {
|
do {
|
||||||
@@ -324,11 +320,8 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
|||||||
|
|
||||||
xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
|
xv_sc_dbg(g, EXEC_CHANGE, "Running link speed change...");
|
||||||
|
|
||||||
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER) != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
|
gk20a_writel(g, xp_pl_link_config_r(0), pl_link_config);
|
||||||
if (pl_link_config ==
|
if (pl_link_config ==
|
||||||
@@ -360,11 +353,7 @@ static int do_xve_set_speed_gp106(struct gk20a *g, u32 next_link_speed)
|
|||||||
* Read NV_XP_PL_LINK_CONFIG until the link has swapped to
|
* Read NV_XP_PL_LINK_CONFIG until the link has swapped to
|
||||||
* the target speed.
|
* the target speed.
|
||||||
*/
|
*/
|
||||||
if (nvgpu_timeout_init(g, &timeout, GPU_XVE_TIMEOUT_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, GPU_XVE_TIMEOUT_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER) != 0) {
|
|
||||||
nvgpu_err(g, "failed to init timeout");
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
do {
|
do {
|
||||||
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
|
pl_link_config = gk20a_readl(g, xp_pl_link_config_r(0));
|
||||||
if (pl_link_config != 0xfffffffU &&
|
if (pl_link_config != 0xfffffffU &&
|
||||||
|
|||||||
@@ -149,9 +149,36 @@ struct nvgpu_timeout {
|
|||||||
*
|
*
|
||||||
* @retval -EINVAL invalid input parameter.
|
* @retval -EINVAL invalid input parameter.
|
||||||
*/
|
*/
|
||||||
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
|
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
u32 duration, unsigned long flags);
|
u32 duration, unsigned long flags);
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Initialize a timeout.
|
||||||
|
*
|
||||||
|
* Init a cpu clock based timeout. See nvgpu_timeout_init_flags() and
|
||||||
|
* NVGPU_TIMER_CPU_TIMER for full explanation.
|
||||||
|
*
|
||||||
|
* @param g [in] GPU driver structure.
|
||||||
|
* @param timeout [in] Timeout object to initialize.
|
||||||
|
* @param duration [in] Timeout duration in milliseconds.
|
||||||
|
*/
|
||||||
|
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_ms);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Initialize a timeout.
|
||||||
|
*
|
||||||
|
* Init a retry based timeout. See nvgpu_timeout_init_flags() and
|
||||||
|
* NVGPU_TIMER_RETRY_TIMER for full explanation.
|
||||||
|
*
|
||||||
|
* @param g [in] GPU driver structure.
|
||||||
|
* @param timeout [in] Timeout object to initialize.
|
||||||
|
* @param duration [in] Timeout duration in number of retries.
|
||||||
|
*/
|
||||||
|
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_count);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Check the timeout status.
|
* @brief Check the timeout status.
|
||||||
*
|
*
|
||||||
|
|||||||
@@ -936,8 +936,7 @@ __acquires(&cde_app->mutex)
|
|||||||
struct gk20a_cde_ctx *cde_ctx = NULL;
|
struct gk20a_cde_ctx *cde_ctx = NULL;
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, MAX_CTX_RETRY_TIME,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, MAX_CTX_RETRY_TIME);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
cde_ctx = gk20a_cde_do_get_context(l);
|
cde_ctx = gk20a_cde_do_get_context(l);
|
||||||
|
|||||||
@@ -774,8 +774,7 @@ int gk20a_block_new_jobs_and_idle(struct gk20a *g)
|
|||||||
|
|
||||||
nvgpu_mutex_acquire(&platform->railgate_lock);
|
nvgpu_mutex_acquire(&platform->railgate_lock);
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, GK20A_WAIT_FOR_IDLE_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
|
|
||||||
/* check and wait until GPU is idle (with a timeout) */
|
/* check and wait until GPU is idle (with a timeout) */
|
||||||
do {
|
do {
|
||||||
@@ -1395,8 +1394,7 @@ static int gk20a_pm_suspend(struct device *dev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_timeout_init(g, &timeout, GK20A_WAIT_FOR_IDLE_MS,
|
nvgpu_timeout_init_cpu_timer(g, &timeout, GK20A_WAIT_FOR_IDLE_MS);
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
/*
|
/*
|
||||||
* Hold back deterministic submits and changes to deterministic
|
* Hold back deterministic submits and changes to deterministic
|
||||||
* channels - this must be outside the power busy locks.
|
* channels - this must be outside the power busy locks.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2016-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify it
|
* This program is free software; you can redistribute it and/or modify it
|
||||||
* under the terms and conditions of the GNU General Public License,
|
* under the terms and conditions of the GNU General Public License,
|
||||||
@@ -36,6 +36,24 @@ static int nvgpu_timeout_is_pre_silicon(struct nvgpu_timeout *timeout)
|
|||||||
return !nvgpu_platform_is_silicon(timeout->g);
|
return !nvgpu_platform_is_silicon(timeout->g);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_ms)
|
||||||
|
{
|
||||||
|
int err = nvgpu_timeout_init_flags(g, timeout, duration_ms,
|
||||||
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
|
|
||||||
|
nvgpu_assert(err == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_count)
|
||||||
|
{
|
||||||
|
int err = nvgpu_timeout_init_flags(g, timeout, duration_count,
|
||||||
|
NVGPU_TIMER_RETRY_TIMER);
|
||||||
|
|
||||||
|
nvgpu_assert(err == 0);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nvgpu_timeout_init - Init timer.
|
* nvgpu_timeout_init - Init timer.
|
||||||
*
|
*
|
||||||
@@ -55,7 +73,7 @@ static int nvgpu_timeout_is_pre_silicon(struct nvgpu_timeout *timeout)
|
|||||||
* If neither %NVGPU_TIMER_CPU_TIMER or %NVGPU_TIMER_RETRY_TIMER is passed then
|
* If neither %NVGPU_TIMER_CPU_TIMER or %NVGPU_TIMER_RETRY_TIMER is passed then
|
||||||
* a CPU timer is used by default.
|
* a CPU timer is used by default.
|
||||||
*/
|
*/
|
||||||
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
|
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
u32 duration, unsigned long flags)
|
u32 duration, unsigned long flags)
|
||||||
{
|
{
|
||||||
if (flags & ~NVGPU_TIMER_FLAG_MASK)
|
if (flags & ~NVGPU_TIMER_FLAG_MASK)
|
||||||
|
|||||||
@@ -122,7 +122,25 @@ static bool time_after(s64 a, s64 b)
|
|||||||
return (nvgpu_safe_sub_s64(a, b) > 0);
|
return (nvgpu_safe_sub_s64(a, b) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_timeout_init(struct gk20a *g, struct nvgpu_timeout *timeout,
|
void nvgpu_timeout_init_cpu_timer(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_ms)
|
||||||
|
{
|
||||||
|
int err = nvgpu_timeout_init_flags(g, timeout, duration_ms,
|
||||||
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
|
|
||||||
|
nvgpu_assert(err == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
void nvgpu_timeout_init_retry(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
|
u32 duration_count)
|
||||||
|
{
|
||||||
|
int err = nvgpu_timeout_init_flags(g, timeout, duration_count,
|
||||||
|
NVGPU_TIMER_RETRY_TIMER);
|
||||||
|
|
||||||
|
nvgpu_assert(err == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int nvgpu_timeout_init_flags(struct gk20a *g, struct nvgpu_timeout *timeout,
|
||||||
u32 duration, unsigned long flags)
|
u32 duration, unsigned long flags)
|
||||||
{
|
{
|
||||||
s64 duration_ns;
|
s64 duration_ns;
|
||||||
|
|||||||
@@ -682,7 +682,9 @@ nvgpu_rwsem_down_write
|
|||||||
nvgpu_rwsem_up_read
|
nvgpu_rwsem_up_read
|
||||||
nvgpu_rwsem_up_write
|
nvgpu_rwsem_up_write
|
||||||
nvgpu_timeout_expired_fault_injection
|
nvgpu_timeout_expired_fault_injection
|
||||||
nvgpu_timeout_init
|
nvgpu_timeout_init_cpu_timer
|
||||||
|
nvgpu_timeout_init_flags
|
||||||
|
nvgpu_timeout_init_retry
|
||||||
nvgpu_timeout_peek_expired
|
nvgpu_timeout_peek_expired
|
||||||
nvgpu_timers_get_fault_injection
|
nvgpu_timers_get_fault_injection
|
||||||
nvgpu_tsg_abort
|
nvgpu_tsg_abort
|
||||||
|
|||||||
@@ -699,7 +699,9 @@ nvgpu_rwsem_down_write
|
|||||||
nvgpu_rwsem_up_read
|
nvgpu_rwsem_up_read
|
||||||
nvgpu_rwsem_up_write
|
nvgpu_rwsem_up_write
|
||||||
nvgpu_timeout_expired_fault_injection
|
nvgpu_timeout_expired_fault_injection
|
||||||
nvgpu_timeout_init
|
nvgpu_timeout_init_cpu_timer
|
||||||
|
nvgpu_timeout_init_flags
|
||||||
|
nvgpu_timeout_init_retry
|
||||||
nvgpu_timeout_peek_expired
|
nvgpu_timeout_peek_expired
|
||||||
nvgpu_timers_get_fault_injection
|
nvgpu_timers_get_fault_injection
|
||||||
nvgpu_tsg_store_sm_error_state
|
nvgpu_tsg_store_sm_error_state
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -64,7 +64,7 @@ int test_timer_init(struct unit_module *m,
|
|||||||
flags = NVGPU_TIMER_CPU_TIMER;
|
flags = NVGPU_TIMER_CPU_TIMER;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout,
|
ret = nvgpu_timeout_init_flags(g, &test_timeout,
|
||||||
duration,
|
duration,
|
||||||
flags);
|
flags);
|
||||||
|
|
||||||
@@ -92,7 +92,7 @@ int test_timer_init_err(struct unit_module *m,
|
|||||||
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
||||||
/* nvgpu_tiemout_init accepts only BIT(0), BIT(8), and BIT(9) as
|
/* nvgpu_tiemout_init accepts only BIT(0), BIT(8), and BIT(9) as
|
||||||
* valid flag bits. So ret should be EINVAL */
|
* valid flag bits. So ret should be EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, (1 << i));
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, (1 << i));
|
||||||
|
|
||||||
if ((i == 0) || (i == 8) || (i == 9)) {
|
if ((i == 0) || (i == 8) || (i == 9)) {
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
@@ -110,55 +110,55 @@ int test_timer_init_err(struct unit_module *m,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(0), BIT(8) and BIT(9) set. Return value should be 0 */
|
/* BIT(0), BIT(8) and BIT(9) set. Return value should be 0 */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x301);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x301);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x301\n");
|
unit_return_fail(m,"Timer init failed with flag 0x301\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(8) and BIT(9) set. Return value should be 0 */
|
/* BIT(8) and BIT(9) set. Return value should be 0 */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x300);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x300);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x300\n");
|
unit_return_fail(m,"Timer init failed with flag 0x300\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(0) and BIT(8) set. Return value should be 0 */
|
/* BIT(0) and BIT(8) set. Return value should be 0 */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x101);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x101);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x101\n");
|
unit_return_fail(m,"Timer init failed with flag 0x101\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(0) and BIT(9) set. Return value should be 0 */
|
/* BIT(0) and BIT(9) set. Return value should be 0 */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x201);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x201);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x201\n");
|
unit_return_fail(m,"Timer init failed with flag 0x201\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(0), BIT(7) and BIT(9) set. Return value should be -EINVAL */
|
/* BIT(0), BIT(7) and BIT(9) set. Return value should be -EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x281);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x281);
|
||||||
if (ret != -EINVAL) {
|
if (ret != -EINVAL) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x281\n");
|
unit_return_fail(m,"Timer init failed with flag 0x281\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(5), BIT(7) and BIT(9) set. Return value should be -EINVAL */
|
/* BIT(5), BIT(7) and BIT(9) set. Return value should be -EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x2A0);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x2A0);
|
||||||
if (ret != -EINVAL) {
|
if (ret != -EINVAL) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x2A0\n");
|
unit_return_fail(m,"Timer init failed with flag 0x2A0\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(1), BIT(2) and BIT(3) set. Return value should be -EINVAL */
|
/* BIT(1), BIT(2) and BIT(3) set. Return value should be -EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x00E);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x00E);
|
||||||
if (ret != -EINVAL) {
|
if (ret != -EINVAL) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x00E\n");
|
unit_return_fail(m,"Timer init failed with flag 0x00E\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* BIT(1) to BIT(7) set. Return value should be -EINVAL */
|
/* BIT(1) to BIT(7) set. Return value should be -EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0x07E);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0x07E);
|
||||||
if (ret != -EINVAL) {
|
if (ret != -EINVAL) {
|
||||||
unit_return_fail(m,"Timer init failed with flag 0x07E\n");
|
unit_return_fail(m,"Timer init failed with flag 0x07E\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* All bits set. Return value should be -EINVAL */
|
/* All bits set. Return value should be -EINVAL */
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout, 10, 0xFFFFFFFFFFFFFFFF);
|
ret = nvgpu_timeout_init_flags(g, &test_timeout, 10, 0xFFFFFFFFFFFFFFFF);
|
||||||
if (ret != -EINVAL) {
|
if (ret != -EINVAL) {
|
||||||
unit_return_fail(m,"Timer init failed with flag all 1s\n");
|
unit_return_fail(m,"Timer init failed with flag all 1s\n");
|
||||||
}
|
}
|
||||||
@@ -169,17 +169,9 @@ int test_timer_init_err(struct unit_module *m,
|
|||||||
int test_timer_counter(struct unit_module *m,
|
int test_timer_counter(struct unit_module *m,
|
||||||
struct gk20a *g, void *args)
|
struct gk20a *g, void *args)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout,
|
nvgpu_timeout_init_retry(g, &test_timeout, TEST_TIMER_COUNT);
|
||||||
TEST_TIMER_COUNT,
|
|
||||||
NVGPU_TIMER_RETRY_TIMER);
|
|
||||||
|
|
||||||
if (ret != 0) {
|
|
||||||
unit_return_fail(m, "Timer init failed %d\n", ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
usleep(1);
|
usleep(1);
|
||||||
@@ -200,13 +192,7 @@ int test_timer_duration(struct unit_module *m,
|
|||||||
|
|
||||||
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout,
|
nvgpu_timeout_init_cpu_timer(g, &test_timeout, TEST_TIMER_DURATION);
|
||||||
TEST_TIMER_DURATION,
|
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
|
||||||
|
|
||||||
if (ret != 0) {
|
|
||||||
unit_return_fail(m, "Timer init failed %d\n", ret);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Timer should not be expired.
|
* Timer should not be expired.
|
||||||
@@ -251,7 +237,7 @@ int test_timer_fault_injection(struct unit_module *m,
|
|||||||
|
|
||||||
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
memset(&test_timeout, 0, sizeof(struct nvgpu_timeout));
|
||||||
|
|
||||||
ret = nvgpu_timeout_init(g, &test_timeout,
|
ret = nvgpu_timeout_init_flags(g, &test_timeout,
|
||||||
TEST_TIMER_DURATION,
|
TEST_TIMER_DURATION,
|
||||||
NVGPU_TIMER_CPU_TIMER);
|
NVGPU_TIMER_CPU_TIMER);
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2019-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -36,7 +36,7 @@
|
|||||||
*
|
*
|
||||||
* Test Type: Feature
|
* Test Type: Feature
|
||||||
*
|
*
|
||||||
* Targets: nvgpu_timeout_init
|
* Targets: nvgpu_timeout_init_flags
|
||||||
*
|
*
|
||||||
* Inputs:
|
* Inputs:
|
||||||
* 1) The type of timer to be tested is passed as an argument to the test.
|
* 1) The type of timer to be tested is passed as an argument to the test.
|
||||||
@@ -69,7 +69,7 @@ int test_timer_init(struct unit_module *m,
|
|||||||
*
|
*
|
||||||
* Test Type: Boundary values
|
* Test Type: Boundary values
|
||||||
*
|
*
|
||||||
* Targets: nvgpu_timeout_init
|
* Targets: nvgpu_timeout_init_flags
|
||||||
*
|
*
|
||||||
* Inputs:
|
* Inputs:
|
||||||
* 1) Global nvgpu_timeout structure instance.
|
* 1) Global nvgpu_timeout structure instance.
|
||||||
@@ -98,7 +98,7 @@ int test_timer_init_err(struct unit_module *m,
|
|||||||
*
|
*
|
||||||
* Test Type: Feature
|
* Test Type: Feature
|
||||||
*
|
*
|
||||||
* Targets: nvgpu_timeout_init, nvgpu_timeout_expired,
|
* Targets: nvgpu_timeout_init_retry, nvgpu_timeout_expired,
|
||||||
* nvgpu_timeout_peek_expired
|
* nvgpu_timeout_peek_expired
|
||||||
*
|
*
|
||||||
* Input:
|
* Input:
|
||||||
@@ -131,7 +131,7 @@ int test_timer_counter(struct unit_module *m,
|
|||||||
*
|
*
|
||||||
* Test Type: Feature
|
* Test Type: Feature
|
||||||
*
|
*
|
||||||
* Targets: nvgpu_timeout_init, nvgpu_timeout_expired,
|
* Targets: nvgpu_timeout_init_cpu_timer, nvgpu_timeout_expired,
|
||||||
* nvgpu_timeout_peek_expired
|
* nvgpu_timeout_peek_expired
|
||||||
*
|
*
|
||||||
* Input:
|
* Input:
|
||||||
|
|||||||
Reference in New Issue
Block a user