mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
gpu: nvgpu: rename public channel unit APIs
Rename the public channel unit APIs to follow the convention of nvgpu_channel_*. gk20a_channel_deterministic_idle -> nvgpu_channel_deterministic_idle gk20a_channel_deterministic_unidle -> nvgpu_channel_deterministic_unidle gk20a_wait_until_counter_is_N -> nvgpu_channel_wait_until_counter_is_N nvgpu_gk20a_alloc_job -> nvgpu_channel_alloc_job Jira NVGPU-3248 Change-Id: I358d63d4e891f6d92c70efe887c07674bc0f9914 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2123398 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
3d1169544f
commit
0eb0242bdd
@@ -224,7 +224,7 @@ void nvgpu_channel_abort(struct nvgpu_channel *ch, bool channel_preempt)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void gk20a_wait_until_counter_is_N(
|
void nvgpu_channel_wait_until_counter_is_N(
|
||||||
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
||||||
struct nvgpu_cond *c, const char *caller, const char *counter_name)
|
struct nvgpu_cond *c, const char *caller, const char *counter_name)
|
||||||
{
|
{
|
||||||
@@ -307,7 +307,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
|
|||||||
|
|
||||||
/* wait until there's only our ref to the channel */
|
/* wait until there's only our ref to the channel */
|
||||||
if (!force) {
|
if (!force) {
|
||||||
gk20a_wait_until_counter_is_N(
|
nvgpu_channel_wait_until_counter_is_N(
|
||||||
ch, &ch->ref_count, 1, &ch->ref_count_dec_wq,
|
ch, &ch->ref_count, 1, &ch->ref_count_dec_wq,
|
||||||
__func__, "references");
|
__func__, "references");
|
||||||
}
|
}
|
||||||
@@ -333,7 +333,7 @@ static void gk20a_free_channel(struct nvgpu_channel *ch, bool force)
|
|||||||
|
|
||||||
/* wait until no more refs to the channel */
|
/* wait until no more refs to the channel */
|
||||||
if (!force) {
|
if (!force) {
|
||||||
gk20a_wait_until_counter_is_N(
|
nvgpu_channel_wait_until_counter_is_N(
|
||||||
ch, &ch->ref_count, 0, &ch->ref_count_dec_wq,
|
ch, &ch->ref_count, 0, &ch->ref_count_dec_wq,
|
||||||
__func__, "references");
|
__func__, "references");
|
||||||
}
|
}
|
||||||
@@ -899,7 +899,7 @@ void nvgpu_channel_free_priv_cmd_entry(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c,
|
int nvgpu_channel_alloc_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job **job_out)
|
struct nvgpu_channel_job **job_out)
|
||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
@@ -2094,7 +2094,7 @@ void nvgpu_channel_clean_up_jobs(struct nvgpu_channel *c,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* ensure all pending writes complete before freeing up the job.
|
* ensure all pending writes complete before freeing up the job.
|
||||||
* see corresponding nvgpu_smp_rmb in nvgpu_gk20a_alloc_job().
|
* see corresponding nvgpu_smp_rmb in nvgpu_channel_alloc_job().
|
||||||
*/
|
*/
|
||||||
nvgpu_smp_wmb();
|
nvgpu_smp_wmb();
|
||||||
|
|
||||||
@@ -2152,9 +2152,9 @@ void nvgpu_channel_update(struct nvgpu_channel *c)
|
|||||||
*
|
*
|
||||||
* Takes write access on g->deterministic_busy.
|
* Takes write access on g->deterministic_busy.
|
||||||
*
|
*
|
||||||
* Must be paired with gk20a_channel_deterministic_unidle().
|
* Must be paired with nvgpu_channel_deterministic_unidle().
|
||||||
*/
|
*/
|
||||||
void gk20a_channel_deterministic_idle(struct gk20a *g)
|
void nvgpu_channel_deterministic_idle(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
struct nvgpu_fifo *f = &g->fifo;
|
||||||
u32 chid;
|
u32 chid;
|
||||||
@@ -2195,7 +2195,7 @@ void gk20a_channel_deterministic_idle(struct gk20a *g)
|
|||||||
*
|
*
|
||||||
* This releases write access on g->deterministic_busy.
|
* This releases write access on g->deterministic_busy.
|
||||||
*/
|
*/
|
||||||
void gk20a_channel_deterministic_unidle(struct gk20a *g)
|
void nvgpu_channel_deterministic_unidle(struct gk20a *g)
|
||||||
{
|
{
|
||||||
struct nvgpu_fifo *f = &g->fifo;
|
struct nvgpu_fifo *f = &g->fifo;
|
||||||
u32 chid;
|
u32 chid;
|
||||||
|
|||||||
@@ -525,7 +525,7 @@ static int nvgpu_submit_channel_gpfifo(struct nvgpu_channel *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (need_job_tracking) {
|
if (need_job_tracking) {
|
||||||
err = nvgpu_gk20a_alloc_job(c, &job);
|
err = nvgpu_channel_alloc_job(c, &job);
|
||||||
if (err != 0) {
|
if (err != 0) {
|
||||||
goto clean_up;
|
goto clean_up;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -441,8 +441,8 @@ int nvgpu_channel_disable_tsg(struct gk20a *g, struct nvgpu_channel *ch);
|
|||||||
int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g);
|
int nvgpu_channel_suspend_all_serviceable_ch(struct gk20a *g);
|
||||||
void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g);
|
void nvgpu_channel_resume_all_serviceable_ch(struct gk20a *g);
|
||||||
|
|
||||||
void gk20a_channel_deterministic_idle(struct gk20a *g);
|
void nvgpu_channel_deterministic_idle(struct gk20a *g);
|
||||||
void gk20a_channel_deterministic_unidle(struct gk20a *g);
|
void nvgpu_channel_deterministic_unidle(struct gk20a *g);
|
||||||
|
|
||||||
int nvgpu_channel_worker_init(struct gk20a *g);
|
int nvgpu_channel_worker_init(struct gk20a *g);
|
||||||
void nvgpu_channel_worker_deinit(struct gk20a *g);
|
void nvgpu_channel_worker_deinit(struct gk20a *g);
|
||||||
@@ -483,10 +483,10 @@ bool nvgpu_channel_joblist_is_empty(struct nvgpu_channel *c);
|
|||||||
|
|
||||||
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add);
|
int nvgpu_channel_update_runlist(struct nvgpu_channel *c, bool add);
|
||||||
|
|
||||||
void gk20a_wait_until_counter_is_N(
|
void nvgpu_channel_wait_until_counter_is_N(
|
||||||
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
struct nvgpu_channel *ch, nvgpu_atomic_t *counter, int wait_value,
|
||||||
struct nvgpu_cond *c, const char *caller, const char *counter_name);
|
struct nvgpu_cond *c, const char *caller, const char *counter_name);
|
||||||
int nvgpu_gk20a_alloc_job(struct nvgpu_channel *c,
|
int nvgpu_channel_alloc_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job **job_out);
|
struct nvgpu_channel_job **job_out);
|
||||||
void nvgpu_channel_free_job(struct nvgpu_channel *c,
|
void nvgpu_channel_free_job(struct nvgpu_channel *c,
|
||||||
struct nvgpu_channel_job *job);
|
struct nvgpu_channel_job *job);
|
||||||
|
|||||||
@@ -519,7 +519,7 @@ int gk20a_do_idle_impl(struct gk20a *g, bool force_reset)
|
|||||||
* Hold back deterministic submits and changes to deterministic
|
* Hold back deterministic submits and changes to deterministic
|
||||||
* channels - this must be outside the power busy locks.
|
* channels - this must be outside the power busy locks.
|
||||||
*/
|
*/
|
||||||
gk20a_channel_deterministic_idle(g);
|
nvgpu_channel_deterministic_idle(g);
|
||||||
|
|
||||||
/* acquire busy lock to block other busy() calls */
|
/* acquire busy lock to block other busy() calls */
|
||||||
down_write(&l->busy_lock);
|
down_write(&l->busy_lock);
|
||||||
@@ -626,7 +626,7 @@ fail_drop_usage_count:
|
|||||||
fail_timeout:
|
fail_timeout:
|
||||||
nvgpu_mutex_release(&platform->railgate_lock);
|
nvgpu_mutex_release(&platform->railgate_lock);
|
||||||
up_write(&l->busy_lock);
|
up_write(&l->busy_lock);
|
||||||
gk20a_channel_deterministic_unidle(g);
|
nvgpu_channel_deterministic_unidle(g);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -676,7 +676,7 @@ int gk20a_do_unidle_impl(struct gk20a *g)
|
|||||||
nvgpu_mutex_release(&platform->railgate_lock);
|
nvgpu_mutex_release(&platform->railgate_lock);
|
||||||
up_write(&l->busy_lock);
|
up_write(&l->busy_lock);
|
||||||
|
|
||||||
gk20a_channel_deterministic_unidle(g);
|
nvgpu_channel_deterministic_unidle(g);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user