/linux/net/x25/ |
A D | x25_in.c | 210 int queued = 0; in x25_state3_machine() local 277 queued = 1; in x25_state3_machine() 315 queued = !sock_queue_rcv_skb(sk, skb); in x25_state3_machine() 319 queued = 1; in x25_state3_machine() 330 return queued; in x25_state3_machine() 418 int queued = 0, frametype, ns, nr, q, d, m; in x25_process_rx_frame() local 427 queued = x25_state1_machine(sk, skb, frametype); in x25_process_rx_frame() 430 queued = x25_state2_machine(sk, skb, frametype); in x25_process_rx_frame() 445 return queued; in x25_process_rx_frame() 450 int queued = x25_process_rx_frame(sk, skb); in x25_backlog_rcv() local [all …]
|
A D | x25_dev.c | 51 int queued = 1; in x25_receive_data() local 56 queued = x25_process_rx_frame(sk, skb); in x25_receive_data() 58 queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); in x25_receive_data() 62 return queued; in x25_receive_data()
|
/linux/net/rose/ |
A D | rose_in.c | 104 int queued = 0; in rose_state3_machine() local 167 queued = 1; in rose_state3_machine() 204 return queued; in rose_state3_machine() 265 int queued = 0, frametype, ns, nr, q, d, m; in rose_process_rx_frame() local 274 queued = rose_state1_machine(sk, skb, frametype); in rose_process_rx_frame() 277 queued = rose_state2_machine(sk, skb, frametype); in rose_process_rx_frame() 280 queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); in rose_process_rx_frame() 283 queued = rose_state4_machine(sk, skb, frametype); in rose_process_rx_frame() 286 queued = rose_state5_machine(sk, skb, frametype); in rose_process_rx_frame() 292 return queued; in rose_process_rx_frame()
|
/linux/net/dccp/ |
A D | input.c | 45 int queued = 0; in dccp_rcv_close() local 76 queued = 1; in dccp_rcv_close() 86 return queued; in dccp_rcv_close() 91 int queued = 0; in dccp_rcv_closereq() local 101 return queued; in dccp_rcv_closereq() 113 queued = 1; in dccp_rcv_closereq() 120 return queued; in dccp_rcv_closereq() 524 int queued = 0; in dccp_rcv_respond_partopen_state_process() local 568 return queued; in dccp_rcv_respond_partopen_state_process() 578 int queued = 0; in dccp_rcv_state_process() local [all …]
|
/linux/net/ax25/ |
A D | ax25_std_in.c | 143 int queued = 0; in ax25_std_state3_machine() local 225 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state3_machine() 258 return queued; in ax25_std_state3_machine() 268 int queued = 0; in ax25_std_state4_machine() local 380 queued = ax25_rx_iframe(ax25, skb); in ax25_std_state4_machine() 413 return queued; in ax25_std_state4_machine() 421 int queued = 0, frametype, ns, nr, pf; in ax25_std_frame_in() local 427 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 430 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); in ax25_std_frame_in() 433 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_std_frame_in() [all …]
|
A D | ax25_ds_in.c | 147 int queued = 0; in ax25_ds_state3_machine() local 240 queued = ax25_rx_iframe(ax25, skb); in ax25_ds_state3_machine() 273 return queued; in ax25_ds_state3_machine() 281 int queued = 0, frametype, ns, nr, pf; in ax25_ds_frame_in() local 287 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 290 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); in ax25_ds_frame_in() 293 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); in ax25_ds_frame_in() 297 return queued; in ax25_ds_frame_in()
|
A D | ax25_in.c | 103 int queued = 0; in ax25_rx_iframe() local 145 queued = 1; in ax25_rx_iframe() 151 return queued; in ax25_rx_iframe() 159 int queued = 0; in ax25_process_rx_frame() local 167 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 173 queued = ax25_ds_frame_in(ax25, skb, type); in ax25_process_rx_frame() 175 queued = ax25_std_frame_in(ax25, skb, type); in ax25_process_rx_frame() 180 return queued; in ax25_process_rx_frame()
|
/linux/net/netrom/ |
A D | nr_in.c | 153 int queued = 0; in nr_state3_machine() local 225 queued = 1; in nr_state3_machine() 272 return queued; in nr_state3_machine() 279 int queued = 0, frametype; in nr_process_rx_frame() local 288 queued = nr_state1_machine(sk, skb, frametype); in nr_process_rx_frame() 291 queued = nr_state2_machine(sk, skb, frametype); in nr_process_rx_frame() 294 queued = nr_state3_machine(sk, skb, frametype); in nr_process_rx_frame() 300 return queued; in nr_process_rx_frame()
|
/linux/drivers/gpu/drm/ |
A D | drm_flip_work.c | 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit() 108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit() 151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init() 168 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited)); in drm_flip_work_cleanup()
|
/linux/drivers/net/wireless/mediatek/mt76/ |
A D | debugfs.c | 67 i, q->queued, q->head, q->tail); in mt76_queues_read() 77 int i, queued; in mt76_rx_queues_read() local 83 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; in mt76_rx_queues_read() 85 i, q->queued, q->head, q->tail); in mt76_rx_queues_read()
|
A D | sdio.c | 315 q->queued = 0; in mt76s_alloc_rx_queue() 372 if (q->queued > 0) { in mt76s_get_next_rx_entry() 375 q->queued--; in mt76s_get_next_rx_entry() 439 while (q->queued > 0) { in mt76s_process_tx_queue() 455 if (!q->queued) in mt76s_process_tx_queue() 526 if (q->queued == q->ndesc) in mt76s_tx_queue_skb() 541 q->queued++; in mt76s_tx_queue_skb() 552 if (q->queued == q->ndesc) in mt76s_tx_queue_skb_raw() 565 q->queued++; in mt76s_tx_queue_skb_raw()
|
/linux/Documentation/userspace-api/media/mediactl/ |
A D | media-request-ioc-queue.rst | 34 If the request was successfully queued, then the file descriptor can be 37 If the request was already queued before, then ``EBUSY`` is returned. 42 Once a request is queued, then the driver is required to gracefully handle 49 queued directly and you next try to queue a request, or vice versa. 62 The request was already queued or the application queued the first
|
A D | media-request-ioc-reinit.rst | 40 A request can only be re-initialized if it either has not been queued 41 yet, or if it was queued and completed. Otherwise it will set ``errno`` 51 The request is queued but not yet completed.
|
A D | request-api.rst | 24 buffer queues since in practice only one buffer would be queued at a time. 59 instead of being immediately applied, and buffers queued to a request do not 60 enter the regular buffer queue until the request itself is queued. 66 queued by calling :ref:`MEDIA_REQUEST_IOC_QUEUE` on the request file descriptor. 68 A queued request cannot be modified anymore. 86 a buffer was queued via a request or vice versa will result in an ``EBUSY`` 109 request that has been queued but not yet completed will return ``EBUSY`` 121 longer in use by the kernel. That is, if the request is queued and then the 165 Once the request is fully prepared, it can be queued to the driver: 245 Once the request is fully prepared, it can be queued to the driver:
|
/linux/security/integrity/ima/ |
A D | ima_asymmetric_keys.c | 33 bool queued = false; in ima_post_key_create_or_update() local 43 queued = ima_queue_key(keyring, payload, payload_len); in ima_post_key_create_or_update() 45 if (queued) in ima_post_key_create_or_update()
|
A D | ima_queue_keys.c | 107 bool queued = false; in ima_queue_key() local 117 queued = true; in ima_queue_key() 121 if (!queued) in ima_queue_key() 124 return queued; in ima_queue_key()
|
/linux/drivers/md/ |
A D | dm-cache-background-tracker.c | 26 struct list_head queued; member 47 INIT_LIST_HEAD(&b->queued); in btracker_create() 205 list_add(&w->list, &b->queued); in btracker_queue() 219 if (list_empty(&b->queued)) in btracker_issue() 222 w = list_first_entry(&b->queued, struct bt_work, list); in btracker_issue()
|
/linux/virt/kvm/ |
A D | async_pf.c | 134 vcpu->async_pf.queued = 0; in kvm_clear_async_pf_completion_queue() 154 vcpu->async_pf.queued--; in kvm_check_async_pf_completion() 168 if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) in kvm_setup_async_pf() 195 vcpu->async_pf.queued++; in kvm_setup_async_pf() 226 vcpu->async_pf.queued++; in kvm_async_pf_wakeup_all()
|
/linux/fs/xfs/ |
A D | xfs_mru_cache.c | 102 unsigned int queued; /* work has been queued */ member 204 if (!mru->queued) { in _xfs_mru_cache_list_insert() 205 mru->queued = 1; in _xfs_mru_cache_list_insert() 280 mru->queued = next; in _xfs_mru_cache_reap() 281 if ((mru->queued > 0)) { in _xfs_mru_cache_reap() 388 if (mru->queued) { in xfs_mru_cache_flush()
|
/linux/drivers/media/platform/vsp1/ |
A D | vsp1_dl.c | 224 struct vsp1_dl_list *queued; member 841 if (!dlm->queued) in vsp1_dl_list_hw_update_pending() 899 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous() 900 dlm->queued = dl; in vsp1_dl_list_commit_continuous() 1020 if (dlm->queued) { in vsp1_dlm_irq_frame_end() 1023 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; in vsp1_dlm_irq_frame_end() 1026 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end() 1027 dlm->queued = NULL; in vsp1_dlm_irq_frame_end() 1038 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end() 1074 __vsp1_dl_list_put(dlm->queued); in vsp1_dlm_reset() [all …]
|
/linux/sound/firewire/fireworks/ |
A D | fireworks_hwdep.c | 127 bool queued; in hwdep_read() local 132 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 134 while (!dev_lock_changed && !queued) { in hwdep_read() 143 queued = efw->push_ptr != efw->pull_ptr; in hwdep_read() 150 else if (queued) in hwdep_read()
|
/linux/Documentation/userspace-api/media/v4l/ |
A D | vidioc-streamon.rst | 52 If ``VIDIOC_STREAMON`` fails then any already queued buffers will remain 53 queued. 64 If buffers have been queued with :ref:`VIDIOC_QBUF` and 66 ``VIDIOC_STREAMON``, then those queued buffers will also be removed from 78 but ``VIDIOC_STREAMOFF`` will return queued buffers to their starting
|
/linux/Documentation/usb/ |
A D | ohci.rst | 22 - interrupt transfers can be larger, and can be queued 28 types can be queued. That was also true in "usb-ohci", except for interrupt 30 to overhead in IRQ processing. When interrupt transfers are queued, those
|
/linux/Documentation/features/locking/queued-rwlocks/ |
A D | arch-support.txt | 2 # Feature name: queued-rwlocks 4 # description: arch supports queued rwlocks
|
/linux/Documentation/features/locking/queued-spinlocks/ |
A D | arch-support.txt | 2 # Feature name: queued-spinlocks 4 # description: arch supports queued spinlocks
|