Home
last modified time | relevance | path

Searched refs:work (Results 1 – 25 of 2495) sorted by relevance

12345678910>>...100

/linux/virt/kvm/
A Dasync_pf.c48 container_of(work, struct kvm_async_pf, work); in async_pf_execute()
109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue()
114 flush_work(&work->work); in kvm_clear_async_pf_completion_queue()
116 if (cancel_work_sync(&work->work)) { in kvm_clear_async_pf_completion_queue()
117 mmput(work->mm); in kvm_clear_async_pf_completion_queue()
144 work = list_first_entry(&vcpu->async_pf.done, typeof(*work), in kvm_check_async_pf_completion()
180 if (!work) in kvm_setup_async_pf()
189 mmget(work->mm); in kvm_setup_async_pf()
192 INIT_WORK(&work->work, async_pf_execute); in kvm_setup_async_pf()
198 schedule_work(&work->work); in kvm_setup_async_pf()
[all …]
/linux/drivers/gpu/drm/
A Ddrm_flip_work.c86 work->func(work, val); in drm_flip_work_queue()
107 list_splice_tail(&work->queued, &work->commited); in drm_flip_work_commit()
108 INIT_LIST_HEAD(&work->queued); in drm_flip_work_commit()
110 queue_work(wq, &work->worker); in drm_flip_work_commit()
133 work->func(work, task->data); in flip_worker()
150 work->name = name; in drm_flip_work_init()
151 INIT_LIST_HEAD(&work->queued); in drm_flip_work_init()
152 INIT_LIST_HEAD(&work->commited); in drm_flip_work_init()
153 spin_lock_init(&work->lock); in drm_flip_work_init()
154 work->func = func; in drm_flip_work_init()
[all …]
A Ddrm_vblank_work.c58 list_del_init(&work->node); in drm_handle_vblank_works()
77 list_del_init(&work->node); in drm_vblank_cancel_pending_works()
117 if (work->cancelling) in drm_vblank_work_schedule()
126 if (list_empty(&work->node)) { in drm_vblank_work_schedule()
137 work->count = count; in drm_vblank_work_schedule()
150 list_del_init(&work->node); in drm_vblank_work_schedule()
189 if (!list_empty(&work->node)) { in drm_vblank_work_cancel_sync()
190 list_del_init(&work->node); in drm_vblank_work_cancel_sync()
195 work->cancelling++; in drm_vblank_work_cancel_sync()
204 work->cancelling--; in drm_vblank_work_cancel_sync()
[all …]
/linux/fs/ksmbd/
A Dserver.c96 if (ksmbd_conn_exiting(work) || ksmbd_conn_need_reconnect(work)) { in check_conn_state()
136 if (work->sess && conn->ops->is_sign_req(work, command)) { in __process_request()
144 ret = cmds->proc(work); in __process_request()
224 if (work->sess && in __handle_ksmbd_work()
225 (work->sess->sign || smb3_11_final_sess_setup_resp(work) || in __handle_ksmbd_work()
235 if (work->sess && work->sess->enc && work->encrypted && in __handle_ksmbd_work()
244 ksmbd_conn_write(work); in __handle_ksmbd_work()
255 struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); in handle_ksmbd_work() local
279 if (!work) { in queue_ksmbd_work()
284 work->conn = conn; in queue_ksmbd_work()
[all …]
A Dksmbd_work.c23 if (work) { in ksmbd_alloc_work_struct()
24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct()
28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct()
31 return work; in ksmbd_alloc_work_struct()
38 kvfree(work->response_buf); in ksmbd_free_work_struct()
39 kvfree(work->aux_payload_buf); in ksmbd_free_work_struct()
40 kfree(work->tr_buf); in ksmbd_free_work_struct()
41 kvfree(work->request_buf); in ksmbd_free_work_struct()
42 if (work->async_id) in ksmbd_free_work_struct()
43 ksmbd_release_id(&work->conn->async_ida, work->async_id); in ksmbd_free_work_struct()
[all …]
A Dsmb2pdu.c99 work->tcon = NULL; in smb2_get_ksmbd_tcon()
113 work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id); in smb2_get_ksmbd_tcon()
114 if (!work->tcon) { in smb2_get_ksmbd_tcon()
583 if (work->sess) in smb2_check_user_session()
3941 query_dir_private.work = work; in smb2_query_dir()
4780 rc = get_file_all_info(work, rsp, fp, work->response_buf); in smb2_get_info_file()
4785 get_file_alternate_info(work, rsp, fp, work->response_buf); in smb2_get_info_file()
4790 get_file_stream_info(work, rsp, fp, work->response_buf); in smb2_get_info_file()
4810 rc = smb2_get_ea(work, fp, req, rsp, work->response_buf); in smb2_get_info_file()
5908 return smb2_create_link(work, work->tcon->share_conf, in smb2_set_info_file()
[all …]
A Dconnection.c108 work->syncronous = true; in ksmbd_conn_enqueue_request()
128 if (!work->multiRsp) in ksmbd_conn_try_dequeue_request()
131 if (!work->multiRsp) { in ksmbd_conn_try_dequeue_request()
133 if (work->syncronous == false) in ksmbd_conn_try_dequeue_request()
167 if (!work->response_buf) { in ksmbd_conn_write()
172 if (work->tr_buf) { in ksmbd_conn_write()
178 if (work->aux_payload_sz) { in ksmbd_conn_write()
179 iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz }; in ksmbd_conn_write()
181 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz }; in ksmbd_conn_write()
184 if (work->tr_buf) in ksmbd_conn_write()
[all …]
A Dconnection.h154 int ksmbd_conn_write(struct ksmbd_work *work);
161 void ksmbd_conn_enqueue_request(struct ksmbd_work *work);
174 static inline bool ksmbd_conn_good(struct ksmbd_work *work) in ksmbd_conn_good() argument
176 return work->conn->status == KSMBD_SESS_GOOD; in ksmbd_conn_good()
181 return work->conn->status == KSMBD_SESS_NEED_NEGOTIATE; in ksmbd_conn_need_negotiate()
186 return work->conn->status == KSMBD_SESS_NEED_RECONNECT; in ksmbd_conn_need_reconnect()
191 return work->conn->status == KSMBD_SESS_EXITING; in ksmbd_conn_exiting()
196 work->conn->status = KSMBD_SESS_GOOD; in ksmbd_conn_set_good()
201 work->conn->status = KSMBD_SESS_NEED_NEGOTIATE; in ksmbd_conn_set_need_negotiate()
206 work->conn->status = KSMBD_SESS_NEED_RECONNECT; in ksmbd_conn_set_need_reconnect()
[all …]
/linux/include/trace/events/
A Dworkqueue.h31 __field( void *, work )
39 __entry->work = work;
63 TP_ARGS(work),
66 __field( void *, work )
70 __entry->work = work;
86 TP_ARGS(work),
89 __field( void *, work )
94 __entry->work = work;
112 TP_ARGS(work, function),
115 __field( void *, work )
[all …]
/linux/kernel/
A Dirq_work.c109 if (!irq_work_claim(work)) in irq_work_queue()
114 __irq_work_queue_local(work); in irq_work_queue()
130 return irq_work_queue(work); in irq_work_queue_on()
137 if (!irq_work_claim(work)) in irq_work_queue_on()
140 kasan_record_aux_stack(work); in irq_work_queue_on()
159 if (!irq_work_claim(work)) in irq_work_queue_on()
193 struct irq_work *work = arg; in irq_work_single() local
211 work->func(work); in irq_work_single()
227 struct irq_work *work, *tmp; in irq_work_run_list() local
242 irq_work_single(work); in irq_work_run_list()
[all …]
A Dtask_work.c44 work->next = head; in task_work_add()
92 pprev = &work->next; in task_work_cancel_match()
93 else if (cmpxchg(pprev, work, work->next) == work) in task_work_cancel_match()
98 return work; in task_work_cancel_match()
144 if (!work) { in task_work_run()
150 } while (cmpxchg(&task->task_works, work, head) != work); in task_work_run()
152 if (!work) in task_work_run()
163 next = work->next; in task_work_run()
164 work->func(work); in task_work_run()
165 work = next; in task_work_run()
[all …]
A Dkthread.c748 work = NULL; in kthread_worker_fn()
758 if (work) { in kthread_worker_fn()
762 work->func(work); in kthread_worker_fn()
895 return !list_empty(&work->node) || work->canceling; in queuing_blocked()
904 WARN_ON_ONCE(work->worker && work->worker != worker); in kthread_insert_work_sanity_check()
961 struct kthread_work *work = &dwork->work; in kthread_delayed_work_timer_fn() local
991 struct kthread_work *work = &dwork->work; in __kthread_queue_delayed_work() local
1035 struct kthread_work *work = &dwork->work; in kthread_queue_delayed_work() local
1059 container_of(work, struct kthread_flush_work, work); in kthread_flush_work_fn()
1112 container_of(work, struct kthread_delayed_work, work); in kthread_cancel_delayed_work_timer()
[all …]
A Dworkqueue.c535 debug_object_free(&work->work, &work_debug_descr); in destroy_delayed_work_on_stack()
635 atomic_long_set(&work->data, data | flags | work_static(work)); in set_work_data()
1653 struct work_struct *work = &dwork->work; in __queue_delayed_work() local
1695 struct work_struct *work = &dwork->work; in queue_delayed_work_on() local
1772 struct work_struct *work = &rwork->work; in queue_rcu_work() local
2078 send_mayday(work); in pool_mayday_timeout()
2652 struct wq_barrier *barr = container_of(work, struct wq_barrier, work); in wq_barrier_func()
3152 cwait.work = work; in __cancel_work_timer()
3265 set_work_pool_and_clear_pending(work, get_work_pool_id(work)); in __cancel_work()
3359 fn(&ew->work); in execute_in_process_context()
[all …]
/linux/include/linux/
A Dcompletion.h35 #define COMPLETION_INITIALIZER(work) \ argument
39 (*({ init_completion_map(&(work), &(map)); &(work); }))
41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ argument
42 (*({ init_completion(&work); &work; }))
52 #define DECLARE_COMPLETION(work) \ argument
53 struct completion work = COMPLETION_INITIALIZER(work)
68 # define DECLARE_COMPLETION_ONSTACK(work) \ argument
69 struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
71 struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
73 # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) argument
[all …]
A Dworkqueue.h28 #define work_data_bits(work) ((unsigned long *)(&(work)->data)) argument
111 struct work_struct work; member
120 struct work_struct work; member
155 return container_of(work, struct delayed_work, work); in to_delayed_work()
160 return container_of(work, struct rcu_work, work); in to_rcu_work()
164 struct work_struct work; member
187 .work = __WORK_INITIALIZER((n).work, (f)), \
288 #define work_pending(work) \ argument
297 work_pending(&(w)->work)
439 struct work_struct *work);
[all …]
A Dkthread.h112 struct kthread_work work; member
122 #define KTHREAD_WORK_INIT(work, fn) { \ argument
123 .node = LIST_HEAD_INIT((work).node), \
128 .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
136 #define DEFINE_KTHREAD_WORK(work, fn) \ argument
137 struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
165 #define kthread_init_work(work, fn) \ argument
168 INIT_LIST_HEAD(&(work)->node); \
169 (work)->func = (fn); \
174 kthread_init_work(&(dwork)->work, (fn)); \
[all …]
A Djump_label_ratelimit.h12 struct delayed_work work; member
18 struct delayed_work work; member
24 struct delayed_work work; member
28 __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
30 __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
33 __static_key_deferred_flush((x), &(x)->work)
37 struct delayed_work *work,
39 extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
43 extern void jump_label_update_timeout(struct work_struct *work);
49 .work = __DELAYED_WORK_INITIALIZER((name).work, \
[all …]
/linux/fs/btrfs/
A Dasync-thread.c63 return work->wq->fs_info; in btrfs_work_owner()
224 struct btrfs_work *work; in run_ordered_work() local
255 work->ordered_func(work); in run_ordered_work()
262 if (work == self) { in run_ordered_work()
290 work->ordered_free(work); in run_ordered_work()
319 if (work->ordered_func) in btrfs_work_helper()
321 wq = work->wq; in btrfs_work_helper()
325 work->func(work); in btrfs_work_helper()
345 work->func = func; in btrfs_init_work()
350 work->flags = 0; in btrfs_init_work()
[all …]
/linux/drivers/staging/octeon/
A Dethernet-rx.c72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error()
112 work->word1.len -= i + 5; in cvm_oct_check_rcv_error()
119 work->word1.len -= i + 4; in cvm_oct_check_rcv_error()
131 cvm_oct_free_work(work); in cvm_oct_check_rcv_error()
138 cvm_oct_free_work(work); in cvm_oct_check_rcv_error()
146 int len = work->word1.len; in copy_segments_to_skb()
222 struct cvmx_wqe *work; in cvm_oct_poll() local
230 prefetch(work); in cvm_oct_poll()
232 if (!work) { in cvm_oct_poll()
299 cvm_oct_free_work(work); in cvm_oct_poll()
[all …]
A Dethernet-tx.c518 if (unlikely(!work)) { in cvm_oct_xmit_pow()
562 work->word1.len = skb->len; in cvm_oct_xmit_pow()
569 work->word2.u64 = 0; in cvm_oct_xmit_pow()
570 work->word2.s.bufs = 1; in cvm_oct_xmit_pow()
571 work->packet_ptr.u64 = 0; in cvm_oct_xmit_pow()
592 work->word2.s.is_v6 = 0; in cvm_oct_xmit_pow()
603 work->word2.s.IP_exc = 0; in cvm_oct_xmit_pow()
609 work->word2.s.not_IP = 0; in cvm_oct_xmit_pow()
643 memcpy(work->packet_data, skb->data, sizeof(work->packet_data)); in cvm_oct_xmit_pow()
647 cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type, in cvm_oct_xmit_pow()
[all …]
/linux/fs/
A Dfs-writeback.c148 kfree(work); in finish_writeback_work()
163 if (work->done) in wb_queue_work()
1002 work = kmalloc(sizeof(*work), GFP_ATOMIC); in bdi_split_work_to_wbs()
1003 if (work) { in bdi_split_work_to_wbs()
1097 work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN); in cgroup_writeback_by_id()
1098 if (work) { in cgroup_writeback_by_id()
1767 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) in writeback_chunk_size()
2027 if ((work->for_background || work->for_kupdate) && in wb_writeback()
2053 if (work->sb) in wb_writeback()
2054 progress = writeback_sb_inodes(work->sb, wb, work); in wb_writeback()
[all …]
A Dio-wq.c474 return work; in io_get_next_work()
485 return work; in io_get_next_work()
527 if (work) { in io_assign_current_work()
558 if (work) in io_worker_handle_work()
562 if (!work) in io_worker_handle_work()
582 work = linked; in io_worker_handle_work()
599 if (!work) in io_worker_handle_work()
603 } while (work); in io_worker_handle_work()
877 work = wq->free_work(work); in io_run_cancel()
878 } while (work); in io_run_cancel()
[all …]
/linux/drivers/accessibility/speakup/
A Dselection.c20 struct work_struct work; member
25 static void __speakup_set_selection(struct work_struct *work) in __speakup_set_selection() argument
28 container_of(work, struct speakup_selection_work, work); in __speakup_set_selection()
58 .work = __WORK_INITIALIZER(speakup_sel_work.work,
87 schedule_work_on(WORK_CPU_UNBOUND, &speakup_sel_work.work); in speakup_set_selection()
96 cancel_work_sync(&speakup_sel_work.work); in speakup_cancel_selection()
106 static void __speakup_paste_selection(struct work_struct *work) in __speakup_paste_selection() argument
109 container_of(work, struct speakup_selection_work, work); in __speakup_paste_selection()
117 .work = __WORK_INITIALIZER(speakup_paste_work.work,
129 schedule_work_on(WORK_CPU_UNBOUND, &speakup_paste_work.work); in speakup_paste_selection()
[all …]
/linux/drivers/infiniband/core/
A Dcm.c1999 cm_issue_rej(work->port, work->mad_recv_wc, in cm_match_req()
2016 cm_issue_rej(work->port, work->mad_recv_wc, in cm_match_req()
2129 memset(&work->path[0], 0, sizeof(work->path[0])); in cm_req_handler()
2147 work->path[1].rec_type = work->path[0].rec_type; in cm_req_handler()
2515 cm_issue_rej(work->port, work->mad_recv_wc, in cm_rep_handler()
2809 cm_issue_drep(work->port, work->mad_recv_wc); in cm_dreq_handler()
3306 work->port->port_num, &work->path[0], in cm_lap_handler()
3842 struct cm_work *work = container_of(_work, struct cm_work, work.work); in cm_work_handler() local
3906 work = kmalloc(sizeof *work, GFP_ATOMIC); in cm_establish()
3947 queue_delayed_work(cm.wq, &work->work, 0); in cm_establish()
[all …]
/linux/drivers/net/wireless/st/cw1200/
A Dsta.h58 void cw1200_event_handler(struct work_struct *work);
59 void cw1200_bss_loss_work(struct work_struct *work);
60 void cw1200_bss_params_work(struct work_struct *work);
61 void cw1200_keep_alive_work(struct work_struct *work);
62 void cw1200_tx_failure_work(struct work_struct *work);
78 void cw1200_join_timeout(struct work_struct *work);
79 void cw1200_unjoin_work(struct work_struct *work);
81 void cw1200_wep_key_work(struct work_struct *work);
90 void cw1200_ba_work(struct work_struct *work);
113 void cw1200_set_tim_work(struct work_struct *work);
[all …]

Completed in 97 milliseconds

12345678910>>...100