mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 17:36:20 +03:00
gpu: nvgpu: use runlist obj for wait_pending
Change the gops_runlist::wait_pending API to take a runlist pointer instead of a runlist ID to better match with the rest of that interface. Jira NVGPU-6425 Change-Id: I96c4f49df8e2613498e0a09cc75a950824828bed Signed-off-by: Konsta Hölttä <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2621214 Reviewed-by: svcacv <svcacv@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
9be8fb80a2
commit
6cff904dc3
@@ -448,7 +448,7 @@ int nvgpu_runlist_update_locked(struct gk20a *g, struct nvgpu_runlist *rl,
|
|||||||
g->ops.runlist.hw_submit(g, rl);
|
g->ops.runlist.hw_submit(g, rl);
|
||||||
|
|
||||||
if (wait_for_finish) {
|
if (wait_for_finish) {
|
||||||
ret = g->ops.runlist.wait_pending(g, rl->id);
|
ret = g->ops.runlist.wait_pending(g, rl);
|
||||||
|
|
||||||
if (ret == -ETIMEDOUT) {
|
if (ret == -ETIMEDOUT) {
|
||||||
nvgpu_err(g, "runlist %d update timeout", rl->id);
|
nvgpu_err(g, "runlist %d update timeout", rl->id);
|
||||||
@@ -501,7 +501,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (g->ops.runlist.wait_pending(g, runlist->id) != 0) {
|
if (g->ops.runlist.wait_pending(g, runlist) != 0) {
|
||||||
nvgpu_err(g, "wait pending failed for runlist %u",
|
nvgpu_err(g, "wait pending failed for runlist %u",
|
||||||
runlist->id);
|
runlist->id);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
* GA10B runlist
|
* GA10B runlist
|
||||||
*
|
*
|
||||||
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -33,7 +33,7 @@ struct nvgpu_runlist;
|
|||||||
u32 ga10b_runlist_count_max(struct gk20a *g);
|
u32 ga10b_runlist_count_max(struct gk20a *g);
|
||||||
u32 ga10b_runlist_length_max(struct gk20a *g);
|
u32 ga10b_runlist_length_max(struct gk20a *g);
|
||||||
void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
int ga10b_runlist_wait_pending(struct gk20a *g, u32 runlist_id);
|
int ga10b_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
void ga10b_runlist_write_state(struct gk20a *g, u32 runlists_mask,
|
void ga10b_runlist_write_state(struct gk20a *g, u32 runlists_mask,
|
||||||
u32 runlist_state);
|
u32 runlist_state);
|
||||||
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
#ifdef CONFIG_NVGPU_CHANNEL_TSG_SCHEDULING
|
||||||
|
|||||||
@@ -76,14 +76,11 @@ void ga10b_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist)
|
|||||||
runlist_submit_length_f(runlist->domain->mem_hw->count));
|
runlist_submit_length_f(runlist->domain->mem_hw->count));
|
||||||
}
|
}
|
||||||
|
|
||||||
int ga10b_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
int ga10b_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
int ret;
|
int ret;
|
||||||
struct nvgpu_runlist *runlist = NULL;
|
|
||||||
|
|
||||||
runlist = g->fifo.runlists[runlist_id];
|
|
||||||
|
|
||||||
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
nvgpu_timeout_init_cpu_timer(g, &timeout, nvgpu_get_poll_timeout(g));
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2011-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2011-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -41,7 +41,7 @@ u32 gk20a_runlist_count_max(struct gk20a *g);
|
|||||||
|
|
||||||
u32 gk20a_runlist_length_max(struct gk20a *g);
|
u32 gk20a_runlist_length_max(struct gk20a *g);
|
||||||
void gk20a_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
void gk20a_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id);
|
int gk20a_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
void gk20a_runlist_write_state(struct gk20a *g, u32 runlists_mask,
|
void gk20a_runlist_write_state(struct gk20a *g, u32 runlists_mask,
|
||||||
u32 runlist_state);
|
u32 runlist_state);
|
||||||
|
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ void gk20a_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist)
|
|||||||
nvgpu_spinlock_release(&g->fifo.runlist_submit_lock);
|
nvgpu_spinlock_release(&g->fifo.runlist_submit_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
int gk20a_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
@@ -70,7 +70,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
|
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
do {
|
do {
|
||||||
if ((nvgpu_readl(g, fifo_eng_runlist_r(runlist_id)) &
|
if ((nvgpu_readl(g, fifo_eng_runlist_r(runlist->id)) &
|
||||||
fifo_eng_runlist_pending_true_f()) == 0U) {
|
fifo_eng_runlist_pending_true_f()) == 0U) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
@@ -82,7 +82,7 @@ int gk20a_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
nvgpu_err(g, "runlist wait timeout: runlist id: %u",
|
nvgpu_err(g, "runlist wait timeout: runlist id: %u",
|
||||||
runlist_id);
|
runlist->id);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ void tu104_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist)
|
|||||||
fifo_runlist_submit_length_f(runlist->domain->mem_hw->count));
|
fifo_runlist_submit_length_f(runlist->domain->mem_hw->count));
|
||||||
}
|
}
|
||||||
|
|
||||||
int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
int tu104_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist)
|
||||||
{
|
{
|
||||||
struct nvgpu_timeout timeout;
|
struct nvgpu_timeout timeout;
|
||||||
u32 delay = POLL_DELAY_MIN_US;
|
u32 delay = POLL_DELAY_MIN_US;
|
||||||
@@ -71,7 +71,7 @@ int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id)
|
|||||||
|
|
||||||
ret = -ETIMEDOUT;
|
ret = -ETIMEDOUT;
|
||||||
do {
|
do {
|
||||||
if ((nvgpu_readl(g, fifo_runlist_submit_info_r(runlist_id)) &
|
if ((nvgpu_readl(g, fifo_runlist_submit_info_r(runlist->id)) &
|
||||||
fifo_runlist_submit_info_pending_true_f()) == 0U) {
|
fifo_runlist_submit_info_pending_true_f()) == 0U) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
|
* Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@@ -30,6 +30,6 @@ struct nvgpu_runlist;
|
|||||||
|
|
||||||
u32 tu104_runlist_count_max(struct gk20a *g);
|
u32 tu104_runlist_count_max(struct gk20a *g);
|
||||||
void tu104_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
void tu104_runlist_hw_submit(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
int tu104_runlist_wait_pending(struct gk20a *g, u32 runlist_id);
|
int tu104_runlist_wait_pending(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
|
|
||||||
#endif /* NVGPU_RUNLIST_FIFO_TU104_H */
|
#endif /* NVGPU_RUNLIST_FIFO_TU104_H */
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ struct gops_runlist {
|
|||||||
u32 *runlist, u32 timeslice);
|
u32 *runlist, u32 timeslice);
|
||||||
void (*get_ch_entry)(struct nvgpu_channel *ch, u32 *runlist);
|
void (*get_ch_entry)(struct nvgpu_channel *ch, u32 *runlist);
|
||||||
void (*hw_submit)(struct gk20a *g, struct nvgpu_runlist *runlist);
|
void (*hw_submit)(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
int (*wait_pending)(struct gk20a *g, u32 runlist_id);
|
int (*wait_pending)(struct gk20a *g, struct nvgpu_runlist *runlist);
|
||||||
void (*write_state)(struct gk20a *g, u32 runlists_mask,
|
void (*write_state)(struct gk20a *g, u32 runlists_mask,
|
||||||
u32 runlist_state);
|
u32 runlist_state);
|
||||||
int (*reschedule)(struct nvgpu_channel *ch, bool preempt_next);
|
int (*reschedule)(struct nvgpu_channel *ch, bool preempt_next);
|
||||||
|
|||||||
@@ -161,6 +161,7 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
|
|||||||
int ret = UNIT_FAIL;
|
int ret = UNIT_FAIL;
|
||||||
struct unit_ctx *ctx = &unit_ctx;
|
struct unit_ctx *ctx = &unit_ctx;
|
||||||
u32 runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
u32 runlist_id = nvgpu_engine_get_gr_runlist_id(g);
|
||||||
|
struct nvgpu_runlist *runlist = g->fifo.runlists[runlist_id];
|
||||||
u32 timeout = g->poll_timeout_default;
|
u32 timeout = g->poll_timeout_default;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
@@ -169,28 +170,28 @@ int test_gk20a_runlist_wait_pending(struct unit_module *m,
|
|||||||
g->poll_timeout_default = 10; /* ms */
|
g->poll_timeout_default = 10; /* ms */
|
||||||
|
|
||||||
ctx->m = m;
|
ctx->m = m;
|
||||||
ctx->addr = fifo_eng_runlist_r(runlist_id);
|
ctx->addr = fifo_eng_runlist_r(runlist->id);
|
||||||
ctx->val_when_count_is_non_zero = fifo_eng_runlist_pending_true_f();
|
ctx->val_when_count_is_non_zero = fifo_eng_runlist_pending_true_f();
|
||||||
ctx->val_when_count_is_zero = 0;
|
ctx->val_when_count_is_zero = 0;
|
||||||
|
|
||||||
/* no wait */
|
/* no wait */
|
||||||
ctx->count = 0;
|
ctx->count = 0;
|
||||||
err = gk20a_runlist_wait_pending(g, runlist_id);
|
err = gk20a_runlist_wait_pending(g, runlist);
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
|
|
||||||
/* 1 loop */
|
/* 1 loop */
|
||||||
ctx->count = 1;
|
ctx->count = 1;
|
||||||
err = gk20a_runlist_wait_pending(g, runlist_id);
|
err = gk20a_runlist_wait_pending(g, runlist);
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
|
|
||||||
/* 2 loops */
|
/* 2 loops */
|
||||||
ctx->count = 2;
|
ctx->count = 2;
|
||||||
err = gk20a_runlist_wait_pending(g, runlist_id);
|
err = gk20a_runlist_wait_pending(g, runlist);
|
||||||
unit_assert(err == 0, goto done);
|
unit_assert(err == 0, goto done);
|
||||||
|
|
||||||
/* timeout */
|
/* timeout */
|
||||||
ctx->count = U32_MAX;
|
ctx->count = U32_MAX;
|
||||||
err = gk20a_runlist_wait_pending(g, runlist_id);
|
err = gk20a_runlist_wait_pending(g, runlist);
|
||||||
unit_assert(err == -ETIMEDOUT, goto done);
|
unit_assert(err == -ETIMEDOUT, goto done);
|
||||||
|
|
||||||
ret = UNIT_SUCCESS;
|
ret = UNIT_SUCCESS;
|
||||||
|
|||||||
Reference in New Issue
Block a user