/linux/drivers/gpu/drm/radeon/ |
A D | radeon_semaphore.c | 50 (*semaphore)->waiters = 0; in radeon_semaphore_create() 66 --semaphore->waiters; in radeon_semaphore_emit_signal() 83 ++semaphore->waiters; in radeon_semaphore_emit_wait() 99 if ((*semaphore)->waiters > 0) { in radeon_semaphore_free()
|
A D | radeon_trace.h | 176 __field(signed, waiters) 182 __entry->waiters = sem->waiters; 187 __entry->waiters, __entry->gpu_addr)
|
/linux/kernel/locking/ |
A D | percpu-rwsem.c | 21 init_waitqueue_head(&sem->waiters); in __percpu_init_rwsem() 144 spin_lock_irq(&sem->waiters.lock); in percpu_rwsem_wait() 152 __add_wait_queue_entry_tail(&sem->waiters, &wq_entry); in percpu_rwsem_wait() 154 spin_unlock_irq(&sem->waiters.lock); in percpu_rwsem_wait() 261 __wake_up(&sem->waiters, TASK_NORMAL, 1, sem); in percpu_up_write()
|
A D | rtmutex_common.h | 95 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); in rt_mutex_has_waiters() 106 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_waiter_is_top_waiter() 113 struct rb_node *leftmost = rb_first_cached(&lock->waiters); in rt_mutex_top_waiter() 161 lock->waiters = RB_ROOT_CACHED; in __rt_mutex_base_init()
|
A D | ww_mutex.h | 96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root); in __ww_waiter_first() 123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root); in __ww_waiter_last()
|
/linux/Documentation/locking/ |
A D | rt-mutex.rst | 32 The enqueueing of the waiters into the rtmutex waiter tree is done in 35 priority waiters tree. This tree too queues in priority order. Whenever 42 without waiters. The optimized fastpath operations require cmpxchg 50 keep track of the "lock has waiters" state: 56 NULL 1 lock is free and has waiters and the top waiter 59 taskpointer 1 lock is held and has waiters [2]_ 71 waiters. This can happen when grabbing the lock in the slow path.
|
A D | rt-mutex-design.rst | 107 place the task in the waiters rbtree of a mutex as well as the 113 waiters 199 Every mutex keeps track of all the waiters that are blocked on itself. The 209 a tree of all top waiters of the mutexes that are owned by the process. 210 Note that this tree only holds the top waiters and not all waiters that are 309 flag. It's set whenever there are waiters on a mutex. 434 the task on the waiters tree of the mutex, and if need be, the pi_waiters 456 waiters of the lock 459 owner of the lock, and if the lock still has waiters, the top_waiter 500 does, then it will take itself off the waiters tree and set itself back [all …]
|
A D | futex-requeue-pi.rst | 7 left without an owner if it has waiters; doing so would break the PI 48 has waiters. Note that pthread_cond_wait() attempts to lock the 50 underlying rt_mutex with waiters, and no owner, breaking the 89 In order to ensure the rt_mutex has an owner if it has waiters, it
|
/linux/drivers/md/persistent-data/ |
A D | dm-block-manager.c | 46 struct list_head waiters; member 153 list_for_each_entry_safe(w, tmp, &lock->waiters, list) { in __wake_many() 179 INIT_LIST_HEAD(&lock->waiters); in bl_init() 188 list_empty(&lock->waiters); in __available_for_read() 214 list_add_tail(&w.list, &lock->waiters); in bl_down_read() 249 if (!list_empty(&lock->waiters)) in bl_up_read() 266 if (lock->count == 0 && list_empty(&lock->waiters)) { in bl_down_write() 281 list_add(&w.list, &lock->waiters); in bl_down_write() 295 if (!list_empty(&lock->waiters)) in bl_up_write()
|
/linux/include/linux/ |
A D | percpu-rwsem.h | 16 wait_queue_head_t waiters; member 35 .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
|
A D | rtmutex.h | 25 struct rb_root_cached waiters; member 32 .waiters = RB_ROOT_CACHED, \
|
/linux/tools/testing/selftests/futex/include/ |
A D | futex2test.h | 18 static inline int futex_waitv(volatile struct futex_waitv *waiters, unsigned long nr_waiters, in futex_waitv() argument 21 return syscall(__NR_futex_waitv, waiters, nr_waiters, flags, timo, clockid); in futex_waitv()
|
/linux/drivers/gpu/host1x/ |
A D | debug.c | 80 unsigned int waiters = 0; in show_syncpts() local 84 waiters++; in show_syncpts() 87 if (!min && !max && !waiters) in show_syncpts() 92 i, m->syncpt[i].name, min, max, waiters); in show_syncpts()
|
/linux/kernel/futex/ |
A D | futex.h | 51 atomic_t waiters; member 192 atomic_inc(&hb->waiters); in futex_hb_waiters_inc() 207 atomic_dec(&hb->waiters); in futex_hb_waiters_dec() 218 return atomic_read(&hb->waiters); in futex_hb_waiters_pending()
|
A D | syscalls.c | 262 SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, in SYSCALL_DEFINE5() argument 276 if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters) in SYSCALL_DEFINE5() 308 ret = futex_parse_waitv(futexv, waiters, nr_futexes); in SYSCALL_DEFINE5()
|
/linux/Documentation/userspace-api/ |
A D | futex2.rst | 25 futex_waitv(struct futex_waitv *waiters, unsigned int nr_futexes, 39 pointer for the first item of the array is passed as ``waiters``. An invalid 40 address for ``waiters`` or for any ``uaddr`` returns ``-EFAULT``. 52 For each entry in ``waiters`` array, the current value at ``uaddr`` is compared
|
/linux/drivers/greybus/ |
A D | greybus_trace.h | 102 __field(int, waiters) 112 __entry->waiters = atomic_read(&operation->waiters); 118 __entry->active, __entry->waiters, __entry->errno)
|
A D | operation.c | 90 if (atomic_read(&operation->waiters)) in gb_operation_put_active() 554 atomic_set(&operation->waiters, 0); in gb_operation_create_common() 1082 atomic_inc(&operation->waiters); in gb_operation_cancel() 1085 atomic_dec(&operation->waiters); in gb_operation_cancel() 1109 atomic_inc(&operation->waiters); in gb_operation_cancel_incoming() 1112 atomic_dec(&operation->waiters); in gb_operation_cancel_incoming()
|
/linux/drivers/tee/optee/ |
A D | call.c | 34 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_init() 49 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_for_completion() 58 list_for_each_entry(w, &cq->waiters, list_node) { in optee_cq_complete_one()
|
A D | optee_private.h | 44 struct list_head waiters; member
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
A D | kfd_events.c | 652 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters) in free_waiters() argument 657 if (waiters[i].event) in free_waiters() 658 remove_wait_queue(&waiters[i].event->wq, in free_waiters() 659 &waiters[i].wait); in free_waiters() 661 kfree(waiters); in free_waiters()
|
/linux/Documentation/scheduler/ |
A D | completion.rst | 247 achieved calls complete() to signal exactly one of the waiters that it can 252 ... or calls complete_all() to signal all current and future waiters:: 262 of waiters to continue - each call to complete() will simply increment the 287 completions that were not yet consumed by waiters (implying that there are 288 waiters) and true otherwise::
|
/linux/tools/testing/selftests/filesystems/epoll/ |
A D | epoll_wakeup_test.c | 3162 int waiters; member 3192 __atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE); in epoll60_wait_thread() 3206 __atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE); in epoll60_wait_thread() 3232 pthread_t waiters[ARRAY_SIZE(ctx.evfd)]; in TEST() local 3252 for (i = 0; i < ARRAY_SIZE(waiters); i++) in TEST() 3253 ASSERT_EQ(pthread_create(&waiters[i], NULL, in TEST() 3290 for (i = 0; i < ARRAY_SIZE(waiters); i++) in TEST() 3291 ret = pthread_kill(waiters[i], SIGUSR1); in TEST() 3292 for (i = 0; i < ARRAY_SIZE(waiters); i++) in TEST() 3293 pthread_join(waiters[i], NULL); in TEST() [all …]
|
/linux/include/linux/greybus/ |
A D | operation.h | 108 atomic_t waiters; member
|
/linux/fs/xfs/ |
A D | xfs_log.c | 153 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init() 164 list_for_each_entry(tic, &head->waiters, t_queue) in xlog_grant_head_wake_all() 196 list_for_each_entry(tic, &head->waiters, t_queue) { in xlog_grant_head_wake() 243 list_add_tail(&tic->t_queue, &head->waiters); in xlog_grant_head_wait() 308 if (!list_empty_careful(&head->waiters)) { in xlog_grant_head_check() 1130 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake() 1139 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
|