gpu: nvgpu: replace wait_queue_head_t with nvgpu_cond

Replace existing usages of wait_queue_head_t with struct nvgpu_cond and
using the corresponding APIs in order to reduce Linux dependencies in NVGPU.

JIRA NVGPU-205

Change-Id: I85850369c3c47d3e1704e4171b1d172361842423
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1575778
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Debarshi Dutta
2017-10-10 10:45:54 +05:30
committed by mobile promotions
parent 30b9cbe35a
commit 61b263d832
14 changed files with 47 additions and 46 deletions

View File

@@ -40,6 +40,7 @@
#include <nvgpu/kref.h>
#include <nvgpu/log.h>
#include <nvgpu/barrier.h>
#include <nvgpu/cond.h>
#include "gk20a/gk20a.h"
#include "clk/clk_arb.h"
@@ -189,7 +190,7 @@ struct nvgpu_clk_arb {
struct work_struct vf_table_fn_work;
struct workqueue_struct *vf_table_work_queue;
wait_queue_head_t request_wq;
struct nvgpu_cond request_wq;
struct nvgpu_clk_vf_table *current_vf_table;
struct nvgpu_clk_vf_table vf_table_pool[2];
@@ -218,7 +219,7 @@ struct nvgpu_clk_dev {
struct list_head link;
struct llist_node node;
};
wait_queue_head_t readout_wq;
struct nvgpu_cond readout_wq;
nvgpu_atomic_t poll_mask;
u16 gpc2clk_target_mhz;
u16 mclk_target_mhz;
@@ -371,7 +372,7 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
INIT_LIST_HEAD_RCU(&arb->sessions);
init_llist_head(&arb->requests);
init_waitqueue_head(&arb->request_wq);
nvgpu_cond_init(&arb->request_wq);
arb->vf_table_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1,
"vf_table_update");
arb->update_work_queue = alloc_workqueue("%s", WQ_HIGHPRI, 1,
@@ -400,8 +401,8 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
do {
/* Check that first run is completed */
nvgpu_smp_mb();
wait_event_interruptible(arb->request_wq,
nvgpu_atomic_read(&arb->req_nr));
NVGPU_COND_WAIT_INTERRUPTIBLE(&arb->request_wq,
nvgpu_atomic_read(&arb->req_nr), 0);
} while (!nvgpu_atomic_read(&arb->req_nr));
@@ -547,7 +548,7 @@ static int nvgpu_clk_arb_install_fd(struct gk20a *g,
fd_install(fd, file);
init_waitqueue_head(&dev->readout_wq);
nvgpu_cond_init(&dev->readout_wq);
nvgpu_atomic_set(&dev->poll_mask, 0);
@@ -1269,7 +1270,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
/* VF Update complete */
nvgpu_clk_arb_set_global_alarm(g, EVENT(VF_UPDATE));
wake_up_interruptible(&arb->request_wq);
nvgpu_cond_signal_interruptible(&arb->request_wq);
#ifdef CONFIG_DEBUG_FS
g->ops.bus.read_ptimer(g, &t1);
@@ -1317,7 +1318,7 @@ exit_arb:
head = llist_del_all(&arb->requests);
llist_for_each_entry_safe(dev, tmp, head, node) {
nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM);
wake_up_interruptible(&dev->readout_wq);
nvgpu_cond_signal_interruptible(&dev->readout_wq);
nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
}
@@ -1444,7 +1445,7 @@ static u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
if (poll_mask) {
nvgpu_atomic_set(&dev->poll_mask, poll_mask);
wake_up_interruptible_all(&dev->readout_wq);
nvgpu_cond_broadcast_interruptible(&dev->readout_wq);
}
return new_alarms_reported;
@@ -1587,8 +1588,8 @@ static ssize_t nvgpu_clk_arb_read_event_dev(struct file *filp, char __user *buf,
while (!__pending_event(dev, &info)) {
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(dev->readout_wq,
__pending_event(dev, &info));
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
__pending_event(dev, &info), 0);
if (err)
return err;
if (info.timestamp)
@@ -1607,7 +1608,7 @@ static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait)
gk20a_dbg_fn("");
poll_wait(filp, &dev->readout_wq, wait);
poll_wait(filp, &dev->readout_wq.wq, wait);
return nvgpu_atomic_xchg(&dev->poll_mask, 0);
}

View File

@@ -39,8 +39,8 @@ static void nvgpu_init_vars(struct gk20a *g)
struct device *dev = dev_from_gk20a(g);
struct gk20a_platform *platform = dev_get_drvdata(dev);
init_waitqueue_head(&l->sw_irq_stall_last_handled_wq);
init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq);
nvgpu_cond_init(&l->sw_irq_stall_last_handled_wq);
nvgpu_cond_init(&l->sw_irq_nonstall_last_handled_wq);
gk20a_init_gr(g);
init_rwsem(&l->busy_lock);
@@ -261,14 +261,14 @@ void nvgpu_wait_for_deferred_interrupts(struct gk20a *g)
int nonstall_irq_threshold = atomic_read(&l->hw_irq_nonstall_count);
/* wait until all stalling irqs are handled */
wait_event(l->sw_irq_stall_last_handled_wq,
NVGPU_COND_WAIT(&l->sw_irq_stall_last_handled_wq,
cyclic_delta(stall_irq_threshold,
atomic_read(&l->sw_irq_stall_last_handled))
<= 0);
<= 0, 0);
/* wait until all non-stalling irqs are handled */
wait_event(l->sw_irq_nonstall_last_handled_wq,
NVGPU_COND_WAIT(&l->sw_irq_nonstall_last_handled_wq,
cyclic_delta(nonstall_irq_threshold,
atomic_read(&l->sw_irq_nonstall_last_handled))
<= 0);
<= 0, 0);
}

View File

@@ -59,7 +59,7 @@ irqreturn_t nvgpu_intr_thread_stall(struct gk20a *g)
/* sync handled irq counter before re-enabling interrupts */
atomic_set(&l->sw_irq_stall_last_handled, hw_irq_count);
wake_up_all(&l->sw_irq_stall_last_handled_wq);
nvgpu_cond_broadcast(&l->sw_irq_stall_last_handled_wq);
trace_mc_gk20a_intr_thread_stall_done(g->name);
@@ -128,7 +128,7 @@ irqreturn_t nvgpu_intr_nonstall(struct gk20a *g)
g->ops.mc.intr_nonstall_resume(g);
wake_up_all(&l->sw_irq_nonstall_last_handled_wq);
nvgpu_cond_broadcast(&l->sw_irq_nonstall_last_handled_wq);
return IRQ_HANDLED;
}

View File

@@ -575,7 +575,7 @@ static unsigned int gk20a_event_id_poll(struct file *filep, poll_table *wait)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_info, "");
poll_wait(filep, &event_id_data->event_id_wq, wait);
poll_wait(filep, &event_id_data->event_id_wq.wq, wait);
nvgpu_mutex_acquire(&event_id_data->lock);
@@ -683,7 +683,7 @@ void gk20a_channel_event_id_post_event(struct channel_gk20a *ch,
event_id, ch->chid);
event_id_data->event_posted = true;
wake_up_interruptible_all(&event_id_data->event_id_wq);
nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
nvgpu_mutex_release(&event_id_data->lock);
}
@@ -735,7 +735,7 @@ static int gk20a_channel_event_id_enable(struct channel_gk20a *ch,
event_id_data->is_tsg = false;
event_id_data->event_id = event_id;
init_waitqueue_head(&event_id_data->event_id_wq);
nvgpu_cond_init(&event_id_data->event_id_wq);
err = nvgpu_mutex_init(&event_id_data->lock);
if (err)
goto clean_up_free;

View File

@@ -97,7 +97,7 @@ void gk20a_tsg_event_id_post_event(struct tsg_gk20a *tsg,
event_id, tsg->tsgid);
event_id_data->event_posted = true;
wake_up_interruptible_all(&event_id_data->event_id_wq);
nvgpu_cond_broadcast_interruptible(&event_id_data->event_id_wq);
nvgpu_mutex_release(&event_id_data->lock);
}
@@ -150,7 +150,7 @@ static int gk20a_tsg_event_id_enable(struct tsg_gk20a *tsg,
event_id_data->is_tsg = true;
event_id_data->event_id = event_id;
init_waitqueue_head(&event_id_data->event_id_wq);
nvgpu_cond_init(&event_id_data->event_id_wq);
err = nvgpu_mutex_init(&event_id_data->lock);
if (err)
goto clean_up_free;

View File

@@ -77,12 +77,12 @@ struct nvgpu_os_linux {
atomic_t hw_irq_stall_count;
atomic_t hw_irq_nonstall_count;
wait_queue_head_t sw_irq_stall_last_handled_wq;
struct nvgpu_cond sw_irq_stall_last_handled_wq;
atomic_t sw_irq_stall_last_handled;
atomic_t nonstall_ops;
wait_queue_head_t sw_irq_nonstall_last_handled_wq;
struct nvgpu_cond sw_irq_nonstall_last_handled_wq;
atomic_t sw_irq_nonstall_last_handled;
struct work_struct nonstall_fn_work;

View File

@@ -126,7 +126,7 @@ struct gk20a_event_id_data {
bool event_posted;
wait_queue_head_t event_id_wq;
struct nvgpu_cond event_id_wq;
struct nvgpu_mutex lock;
struct nvgpu_list_node event_id_node;
};

View File

@@ -53,7 +53,7 @@ struct gk20a_ctxsw_dev {
struct nvgpu_ctxsw_trace_entry *ents;
struct nvgpu_ctxsw_trace_filter filter;
bool write_enabled;
wait_queue_head_t readout_wq;
struct nvgpu_cond readout_wq;
size_t size;
u32 num_ents;
@@ -100,8 +100,8 @@ ssize_t gk20a_ctxsw_dev_read(struct file *filp, char __user *buf, size_t size,
nvgpu_mutex_release(&dev->write_lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(dev->readout_wq,
!ring_is_empty(hdr));
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&dev->readout_wq,
!ring_is_empty(hdr), 0);
if (err)
return err;
nvgpu_mutex_acquire(&dev->write_lock);
@@ -436,7 +436,7 @@ unsigned int gk20a_ctxsw_dev_poll(struct file *filp, poll_table *wait)
gk20a_dbg(gpu_dbg_fn|gpu_dbg_ctxsw, "");
nvgpu_mutex_acquire(&dev->write_lock);
poll_wait(filp, &dev->readout_wq, wait);
poll_wait(filp, &dev->readout_wq.wq, wait);
if (!ring_is_empty(hdr))
mask |= POLLIN | POLLRDNORM;
nvgpu_mutex_release(&dev->write_lock);
@@ -503,7 +503,7 @@ static int gk20a_ctxsw_init_devs(struct gk20a *g)
dev->g = g;
dev->hdr = NULL;
dev->write_enabled = false;
init_waitqueue_head(&dev->readout_wq);
nvgpu_cond_init(&dev->readout_wq);
err = nvgpu_mutex_init(&dev->write_lock);
if (err)
return err;
@@ -683,7 +683,7 @@ void gk20a_ctxsw_trace_wake_up(struct gk20a *g, int vmid)
return;
dev = &g->ctxsw_trace->devs[vmid];
wake_up_interruptible(&dev->readout_wq);
nvgpu_cond_signal_interruptible(&dev->readout_wq);
}
void gk20a_ctxsw_trace_channel_reset(struct gk20a *g, struct channel_gk20a *ch)

View File

@@ -158,7 +158,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
dbg_session->is_pg_disabled = false;
dbg_session->is_timeout_disabled = false;
init_waitqueue_head(&dbg_session->dbg_events.wait_queue);
nvgpu_cond_init(&dbg_session->dbg_events.wait_queue);
nvgpu_init_list_node(&dbg_session->ch_list);
err = nvgpu_mutex_init(&dbg_session->ch_list_lock);
if (err)
@@ -286,7 +286,7 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
poll_wait(filep, &dbg_s->dbg_events.wait_queue, wait);
poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
@@ -337,7 +337,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
dbg_s->dbg_events.num_pending_events++;
wake_up_interruptible_all(&dbg_s->dbg_events.wait_queue);
nvgpu_cond_broadcast_interruptible(&dbg_s->dbg_events.wait_queue);
}
}

View File

@@ -41,7 +41,7 @@ struct channel_gk20a *
nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s);
struct dbg_gpu_session_events {
wait_queue_head_t wait_queue;
struct nvgpu_cond wait_queue;
bool events_enabled;
int num_pending_events;
};

View File

@@ -58,8 +58,8 @@ ssize_t gk20a_sched_dev_read(struct file *filp, char __user *buf,
nvgpu_mutex_release(&sched->status_lock);
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
err = wait_event_interruptible(sched->readout_wq,
sched->status);
err = NVGPU_COND_WAIT_INTERRUPTIBLE(&sched->readout_wq,
sched->status, 0);
if (err)
return err;
nvgpu_mutex_acquire(&sched->status_lock);
@@ -88,7 +88,7 @@ unsigned int gk20a_sched_dev_poll(struct file *filp, poll_table *wait)
gk20a_dbg(gpu_dbg_fn | gpu_dbg_sched, "");
nvgpu_mutex_acquire(&sched->status_lock);
poll_wait(filp, &sched->readout_wq, wait);
poll_wait(filp, &sched->readout_wq.wq, wait);
if (sched->status)
mask |= POLLIN | POLLRDNORM;
nvgpu_mutex_release(&sched->status_lock);
@@ -552,7 +552,7 @@ void gk20a_sched_ctrl_tsg_added(struct gk20a *g, struct tsg_gk20a *tsg)
NVGPU_SCHED_SET(tsg->tsgid, sched->recent_tsg_bitmap);
sched->status |= NVGPU_SCHED_STATUS_TSG_OPEN;
nvgpu_mutex_release(&sched->status_lock);
wake_up_interruptible(&sched->readout_wq);
nvgpu_cond_signal_interruptible(&sched->readout_wq);
}
void gk20a_sched_ctrl_tsg_removed(struct gk20a *g, struct tsg_gk20a *tsg)
@@ -609,7 +609,7 @@ int gk20a_sched_ctrl_init(struct gk20a *g)
goto free_recent;
}
init_waitqueue_head(&sched->readout_wq);
nvgpu_cond_init(&sched->readout_wq);
err = nvgpu_mutex_init(&sched->status_lock);
if (err)

View File

@@ -44,7 +44,7 @@ struct gk20a_sched_ctrl {
u64 *recent_tsg_bitmap;
u64 *ref_tsg_bitmap;
wait_queue_head_t readout_wq;
struct nvgpu_cond readout_wq;
};
int gk20a_sched_dev_release(struct inode *inode, struct file *filp);

View File

@@ -357,7 +357,7 @@ static int pstate_sw_setup(struct gk20a *g)
gk20a_dbg_fn("");
init_waitqueue_head(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
nvgpu_cond_init(&g->perf_pmu.pstatesobjs.pstate_notifier_wq);
err = nvgpu_mutex_init(&g->perf_pmu.pstatesobjs.pstate_mutex);
if (err)

View File

@@ -58,7 +58,7 @@ struct pstate {
struct pstates {
struct boardobjgrp_e32 super;
u32 num_levels;
wait_queue_head_t pstate_notifier_wq;
struct nvgpu_cond pstate_notifier_wq;
u32 is_pstate_switch_on;
struct nvgpu_mutex pstate_mutex; /* protect is_pstate_switch_on */
};