/linux/fs/ |
A D | eventfd.c | 32 wait_queue_head_t wqh; member 80 if (waitqueue_active(&ctx->wqh)) in eventfd_signal() 131 poll_wait(file, &ctx->wqh, wait); in eventfd_poll() 230 spin_lock_irq(&ctx->wqh.lock); in eventfd_read() 250 spin_lock_irq(&ctx->wqh.lock); in eventfd_read() 256 if (waitqueue_active(&ctx->wqh)) in eventfd_read() 258 spin_unlock_irq(&ctx->wqh.lock); in eventfd_read() 279 spin_lock_irq(&ctx->wqh.lock); in eventfd_write() 297 spin_lock_irq(&ctx->wqh.lock); in eventfd_write() 317 spin_lock_irq(&ctx->wqh.lock); in eventfd_show_fdinfo() [all …]
|
A D | timerfd.c | 38 wait_queue_head_t wqh; member 255 poll_wait(file, &ctx->wqh, wait); in timerfd_poll() 274 spin_lock_irq(&ctx->wqh.lock); in timerfd_read() 314 spin_unlock_irq(&ctx->wqh.lock); in timerfd_read() 326 spin_lock_irq(&ctx->wqh.lock); in timerfd_show() 329 spin_unlock_irq(&ctx->wqh.lock); in timerfd_show() 364 spin_lock_irq(&ctx->wqh.lock); in timerfd_ioctl() 432 init_waitqueue_head(&ctx->wqh); in SYSCALL_DEFINE2() 483 spin_lock_irq(&ctx->wqh.lock); in do_timerfd_settime() 521 spin_unlock_irq(&ctx->wqh.lock); in do_timerfd_settime() [all …]
|
A D | fs-writeback.c | 1485 wait_queue_head_t *wqh; in __inode_wait_for_writeback() local 1487 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); in __inode_wait_for_writeback() 1490 __wait_on_bit(wqh, &wq, bit_wait, in __inode_wait_for_writeback() 1515 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC); in inode_sleep_on_writeback() local 1518 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); in inode_sleep_on_writeback() 1523 finish_wait(wqh, &wait); in inode_sleep_on_writeback()
|
A D | userfaultfd.c | 911 wait_queue_head_t *wqh) in find_userfault_in() argument 916 lockdep_assert_held(&wqh->lock); in find_userfault_in() 919 if (!waitqueue_active(wqh)) in find_userfault_in() 922 wq = list_last_entry(&wqh->head, typeof(*wq), entry); in find_userfault_in()
|
/linux/kernel/locking/ |
A D | rtmutex.c | 455 WARN_ON_ONCE(wqh->rtlock_task); in rt_mutex_wake_q_add_task() 457 wqh->rtlock_task = task; in rt_mutex_wake_q_add_task() 459 wake_q_add(&wqh->head, task); in rt_mutex_wake_q_add_task() 473 put_task_struct(wqh->rtlock_task); in rt_mutex_wake_up_q() 474 wqh->rtlock_task = NULL; in rt_mutex_wake_up_q() 477 if (!wake_q_empty(&wqh->head)) in rt_mutex_wake_up_q() 478 wake_up_q(&wqh->head); in rt_mutex_wake_up_q() 1229 rt_mutex_wake_q_add(wqh, waiter); in mark_wakeup_next_waiter() 1288 DEFINE_RT_WAKE_Q(wqh); in rt_mutex_slowunlock() 1341 mark_wakeup_next_waiter(&wqh, lock); in rt_mutex_slowunlock() [all …]
|
A D | rtmutex_api.c | 144 struct rt_wake_q_head *wqh) in __rt_mutex_futex_unlock() argument 161 mark_wakeup_next_waiter(wqh, lock); in __rt_mutex_futex_unlock() 168 DEFINE_RT_WAKE_Q(wqh); in rt_mutex_futex_unlock() 173 postunlock = __rt_mutex_futex_unlock(lock, &wqh); in rt_mutex_futex_unlock() 177 rt_mutex_postunlock(&wqh); in rt_mutex_futex_unlock() 457 void __sched rt_mutex_postunlock(struct rt_wake_q_head *wqh) in rt_mutex_postunlock() argument 459 rt_mutex_wake_up_q(wqh); in rt_mutex_postunlock()
|
A D | rwbase_rt.c | 150 DEFINE_RT_WAKE_Q(wqh); in __rwbase_read_unlock() 161 rt_mutex_wake_q_add_task(&wqh, owner, state); in __rwbase_read_unlock() 166 rt_mutex_wake_up_q(&wqh); in __rwbase_read_unlock()
|
A D | rtmutex_common.h | 84 struct rt_wake_q_head *wqh); 86 extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);
|
/linux/mm/ |
A D | internal.h | 57 wait_queue_head_t *wqh; in wake_throttle_isolated() local 59 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated() 60 if (waitqueue_active(wqh)) in wake_throttle_isolated() 61 wake_up(wqh); in wake_throttle_isolated()
|
A D | backing-dev.c | 1017 wait_queue_head_t *wqh = &congestion_wqh[sync]; in clear_bdi_congested() local 1024 if (waitqueue_active(wqh)) in clear_bdi_congested() 1025 wake_up(wqh); in clear_bdi_congested() 1053 wait_queue_head_t *wqh = &congestion_wqh[sync]; in congestion_wait() local 1055 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); in congestion_wait() 1057 finish_wait(wqh, &wait); in congestion_wait()
|
A D | vmscan.c | 1059 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle() local 1112 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); in reclaim_throttle() 1114 finish_wait(wqh, &wait); in reclaim_throttle() 3420 wait_queue_head_t *wqh; in consider_reclaim_throttle() local 3422 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle() 3423 if (waitqueue_active(wqh)) in consider_reclaim_throttle() 3424 wake_up(wqh); in consider_reclaim_throttle()
|
A D | memcontrol.c | 167 wait_queue_head_t *wqh; member 4649 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove() 4700 wait_queue_head_t *wqh, poll_table *pt) in memcg_event_ptable_queue_proc() argument 4705 event->wqh = wqh; in memcg_event_ptable_queue_proc() 4706 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
|
/linux/drivers/vfio/ |
A D | virqfd.c | 82 wait_queue_head_t *wqh, poll_table *pt) in virqfd_ptable_queue_proc() argument 85 add_wait_queue(wqh, &virqfd->wait); in virqfd_ptable_queue_proc()
|
/linux/drivers/virt/acrn/ |
A D | irqfd.c | 97 static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, in hsm_irqfd_poll_func() argument 103 add_wait_queue(wqh, &irqfd->wait); in hsm_irqfd_poll_func()
|
/linux/kernel/ |
A D | seccomp.c | 226 wait_queue_head_t wqh; member 524 if (waitqueue_active(&orig->wqh)) in __seccomp_filter_orphan() 525 wake_up_poll(&orig->wqh, EPOLLHUP); in __seccomp_filter_orphan() 675 init_waitqueue_head(&sfilter->wqh); in seccomp_prepare_filter() 1104 wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM); in seccomp_do_user_notification() 1461 wake_up_poll(&filter->wqh, EPOLLOUT | EPOLLWRNORM); in seccomp_notify_recv() 1703 poll_wait(file, &filter->wqh, poll_tab); in seccomp_notify_poll()
|
/linux/kernel/futex/ |
A D | pi.c | 618 DEFINE_RT_WAKE_Q(wqh); in wake_futex_pi() 671 postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wqh); in wake_futex_pi() 678 rt_mutex_postunlock(&wqh); in wake_futex_pi()
|
/linux/drivers/vhost/ |
A D | vhost.c | 154 static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, in vhost_poll_func() argument 160 poll->wqh = wqh; in vhost_poll_func() 161 add_wait_queue(wqh, &poll->wait); in vhost_poll_func() 196 poll->wqh = NULL; in vhost_poll_init() 208 if (poll->wqh) in vhost_poll_start() 227 if (poll->wqh) { in vhost_poll_stop() 228 remove_wait_queue(poll->wqh, &poll->wait); in vhost_poll_stop() 229 poll->wqh = NULL; in vhost_poll_stop()
|
A D | vhost.h | 32 wait_queue_head_t *wqh; member
|
/linux/Documentation/RCU/ |
A D | listRCU.rst | 426 spin_lock_irqsave(&ctx->wqh.lock, flags); 430 wake_up_locked_poll(&ctx->wqh, EPOLLIN); 432 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
|
/linux/virt/kvm/ |
A D | eventfd.c | 239 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, in irqfd_ptable_queue_proc() argument 244 add_wait_queue_priority(wqh, &irqfd->wait); in irqfd_ptable_queue_proc()
|