mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 18:16:01 +03:00
gpu: nvgpu: use nvgpu list for channel worker item
Use nvgpu list APIs instead of linux list APIs to store channel worker items Jira NVGPU-13 Change-Id: I01d214810ca2495bd0a644dd1a2816ab8e526981 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1460575 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f474a9e0e4
commit
a6adaaab7a
@@ -1736,7 +1736,7 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
|
|||||||
gk20a_dbg_fn("");
|
gk20a_dbg_fn("");
|
||||||
|
|
||||||
while (__gk20a_channel_worker_pending(g, *get)) {
|
while (__gk20a_channel_worker_pending(g, *get)) {
|
||||||
struct channel_gk20a *ch;
|
struct channel_gk20a *ch = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a channel is on the list, it's guaranteed to be handled
|
* If a channel is on the list, it's guaranteed to be handled
|
||||||
@@ -1751,11 +1751,12 @@ static void gk20a_channel_worker_process(struct gk20a *g, int *get)
|
|||||||
* enqueue are harmless.
|
* enqueue are harmless.
|
||||||
*/
|
*/
|
||||||
nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
|
nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
|
||||||
ch = list_first_entry_or_null(&g->channel_worker.items,
|
if (!nvgpu_list_empty(&g->channel_worker.items)) {
|
||||||
struct channel_gk20a,
|
ch = nvgpu_list_first_entry(&g->channel_worker.items,
|
||||||
|
channel_gk20a,
|
||||||
worker_item);
|
worker_item);
|
||||||
if (ch)
|
nvgpu_list_del(&ch->worker_item);
|
||||||
list_del_init(&ch->worker_item);
|
}
|
||||||
nvgpu_spinlock_release(&g->channel_worker.items_lock);
|
nvgpu_spinlock_release(&g->channel_worker.items_lock);
|
||||||
|
|
||||||
if (!ch) {
|
if (!ch) {
|
||||||
@@ -1818,7 +1819,7 @@ int nvgpu_channel_worker_init(struct gk20a *g)
|
|||||||
|
|
||||||
atomic_set(&g->channel_worker.put, 0);
|
atomic_set(&g->channel_worker.put, 0);
|
||||||
init_waitqueue_head(&g->channel_worker.wq);
|
init_waitqueue_head(&g->channel_worker.wq);
|
||||||
INIT_LIST_HEAD(&g->channel_worker.items);
|
nvgpu_init_list_node(&g->channel_worker.items);
|
||||||
nvgpu_spinlock_init(&g->channel_worker.items_lock);
|
nvgpu_spinlock_init(&g->channel_worker.items_lock);
|
||||||
task = kthread_run(gk20a_channel_poll_worker, g,
|
task = kthread_run(gk20a_channel_poll_worker, g,
|
||||||
"nvgpu_channel_poll_%s", g->name);
|
"nvgpu_channel_poll_%s", g->name);
|
||||||
@@ -1861,7 +1862,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
|
|||||||
}
|
}
|
||||||
|
|
||||||
nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
|
nvgpu_spinlock_acquire(&g->channel_worker.items_lock);
|
||||||
if (!list_empty(&ch->worker_item)) {
|
if (!nvgpu_list_empty(&ch->worker_item)) {
|
||||||
/*
|
/*
|
||||||
* Already queued, so will get processed eventually.
|
* Already queued, so will get processed eventually.
|
||||||
* The worker is probably awake already.
|
* The worker is probably awake already.
|
||||||
@@ -1870,7 +1871,7 @@ void gk20a_channel_worker_enqueue(struct channel_gk20a *ch)
|
|||||||
gk20a_channel_put(ch);
|
gk20a_channel_put(ch);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
list_add_tail(&ch->worker_item, &g->channel_worker.items);
|
nvgpu_list_add_tail(&ch->worker_item, &g->channel_worker.items);
|
||||||
nvgpu_spinlock_release(&g->channel_worker.items_lock);
|
nvgpu_spinlock_release(&g->channel_worker.items_lock);
|
||||||
|
|
||||||
__gk20a_channel_worker_wakeup(g);
|
__gk20a_channel_worker_wakeup(g);
|
||||||
@@ -2646,7 +2647,7 @@ int gk20a_init_channel_support(struct gk20a *g, u32 chid)
|
|||||||
INIT_LIST_HEAD(&c->joblist.dynamic.jobs);
|
INIT_LIST_HEAD(&c->joblist.dynamic.jobs);
|
||||||
nvgpu_init_list_node(&c->dbg_s_list);
|
nvgpu_init_list_node(&c->dbg_s_list);
|
||||||
nvgpu_init_list_node(&c->event_id_list);
|
nvgpu_init_list_node(&c->event_id_list);
|
||||||
INIT_LIST_HEAD(&c->worker_item);
|
nvgpu_init_list_node(&c->worker_item);
|
||||||
|
|
||||||
err = nvgpu_mutex_init(&c->ioctl_lock);
|
err = nvgpu_mutex_init(&c->ioctl_lock);
|
||||||
if (err)
|
if (err)
|
||||||
|
|||||||
@@ -217,7 +217,7 @@ struct channel_gk20a {
|
|||||||
|
|
||||||
struct channel_gk20a_timeout timeout;
|
struct channel_gk20a_timeout timeout;
|
||||||
/* for job cleanup handling in the background worker */
|
/* for job cleanup handling in the background worker */
|
||||||
struct list_head worker_item;
|
struct nvgpu_list_node worker_item;
|
||||||
|
|
||||||
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
#if defined(CONFIG_GK20A_CYCLE_STATS)
|
||||||
struct {
|
struct {
|
||||||
@@ -284,6 +284,13 @@ channel_gk20a_from_ch_entry(struct nvgpu_list_node *node)
|
|||||||
((uintptr_t)node - offsetof(struct channel_gk20a, ch_entry));
|
((uintptr_t)node - offsetof(struct channel_gk20a, ch_entry));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline struct channel_gk20a *
|
||||||
|
channel_gk20a_from_worker_item(struct nvgpu_list_node *node)
|
||||||
|
{
|
||||||
|
return (struct channel_gk20a *)
|
||||||
|
((uintptr_t)node - offsetof(struct channel_gk20a, worker_item));
|
||||||
|
};
|
||||||
|
|
||||||
static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
|
static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
|
||||||
{
|
{
|
||||||
return !!ch->vm;
|
return !!ch->vm;
|
||||||
|
|||||||
@@ -1107,7 +1107,7 @@ struct gk20a {
|
|||||||
struct task_struct *poll_task;
|
struct task_struct *poll_task;
|
||||||
atomic_t put;
|
atomic_t put;
|
||||||
wait_queue_head_t wq;
|
wait_queue_head_t wq;
|
||||||
struct list_head items;
|
struct nvgpu_list_node items;
|
||||||
struct nvgpu_spinlock items_lock;
|
struct nvgpu_spinlock items_lock;
|
||||||
} channel_worker;
|
} channel_worker;
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user