/kernel/linux/linux-6.6/fs/smb/server/ |
H A D | ksmbd_work.c | 21 struct ksmbd_work *work = kmem_cache_zalloc(work_cache, GFP_KERNEL); in ksmbd_alloc_work_struct() local 23 if (work) { in ksmbd_alloc_work_struct() 24 work->compound_fid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 25 work->compound_pfid = KSMBD_NO_FID; in ksmbd_alloc_work_struct() 26 INIT_LIST_HEAD(&work->request_entry); in ksmbd_alloc_work_struct() 27 INIT_LIST_HEAD(&work->async_request_entry); in ksmbd_alloc_work_struct() 28 INIT_LIST_HEAD(&work->fp_entry); in ksmbd_alloc_work_struct() 29 INIT_LIST_HEAD(&work->interim_entry); in ksmbd_alloc_work_struct() 30 INIT_LIST_HEAD(&work->aux_read_list); in ksmbd_alloc_work_struct() 31 work in ksmbd_alloc_work_struct() 42 ksmbd_free_work_struct(struct ksmbd_work *work) ksmbd_free_work_struct() argument 96 ksmbd_queue_work(struct ksmbd_work *work) ksmbd_queue_work() argument 101 __ksmbd_iov_pin(struct ksmbd_work *work, void *ib, unsigned int ib_len) __ksmbd_iov_pin() argument 109 __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len, void *aux_buf, unsigned int aux_size) __ksmbd_iov_pin_rsp() argument 159 ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len) ksmbd_iov_pin_rsp() argument 164 ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len, void *aux_buf, unsigned int aux_size) ksmbd_iov_pin_rsp_read() argument 170 allocate_interim_rsp_buf(struct ksmbd_work *work) allocate_interim_rsp_buf() argument [all...] |
H A D | server.c | 88 * @work: smb work containing server thread information 92 static inline int check_conn_state(struct ksmbd_work *work) in check_conn_state() argument 96 if (ksmbd_conn_exiting(work->conn) || in check_conn_state() 97 ksmbd_conn_need_reconnect(work->conn)) { in check_conn_state() 98 rsp_hdr = work->response_buf; in check_conn_state() 108 static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn, in __process_request() argument 115 if (check_conn_state(work)) in __process_request() 118 if (ksmbd_verify_smb_message(work)) { in __process_request() 119 conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETE in __process_request() 163 __handle_ksmbd_work(struct ksmbd_work *work, struct ksmbd_conn *conn) __handle_ksmbd_work() argument 265 struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work); handle_ksmbd_work() local 292 struct ksmbd_work *work; queue_ksmbd_work() local 388 server_ctrl_handle_work(struct work_struct *work) server_ctrl_handle_work() argument [all...] |
H A D | smb2pdu.h | 420 bool is_smb2_neg_cmd(struct ksmbd_work *work); 421 bool is_smb2_rsp(struct ksmbd_work *work); 423 u16 get_smb2_cmd_val(struct ksmbd_work *work); 424 void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err); 425 int init_smb2_rsp_hdr(struct ksmbd_work *work); 426 int smb2_allocate_rsp_buf(struct ksmbd_work *work); 427 bool is_chained_smb2_message(struct ksmbd_work *work); 428 int init_smb2_neg_rsp(struct ksmbd_work *work); 429 void smb2_set_err_rsp(struct ksmbd_work *work); 430 int smb2_check_user_session(struct ksmbd_work *work); [all...] |
H A D | smb2pdu.c | 42 static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) in __wbuf() argument 44 if (work->next_smb2_rcv_hdr_off) { in __wbuf() 45 *req = ksmbd_req_buf_next(work); in __wbuf() 46 *rsp = ksmbd_resp_buf_next(work); in __wbuf() 48 *req = smb2_get_msg(work->request_buf); in __wbuf() 49 *rsp = smb2_get_msg(work->response_buf); in __wbuf() 83 * @work: smb work 88 int smb2_get_ksmbd_tcon(struct ksmbd_work *work) in smb2_get_ksmbd_tcon() argument 90 struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work); in smb2_get_ksmbd_tcon() 138 smb2_set_err_rsp(struct ksmbd_work *work) smb2_set_err_rsp() argument 169 is_smb2_neg_cmd(struct ksmbd_work *work) is_smb2_neg_cmd() argument 193 is_smb2_rsp(struct ksmbd_work *work) is_smb2_rsp() argument 214 get_smb2_cmd_val(struct ksmbd_work *work) get_smb2_cmd_val() argument 230 set_smb2_rsp_status(struct ksmbd_work *work, __le32 err) set_smb2_rsp_status() argument 250 init_smb2_neg_rsp(struct ksmbd_work *work) init_smb2_neg_rsp() argument 311 smb2_set_rsp_credits(struct ksmbd_work *work) smb2_set_rsp_credits() argument 373 init_chained_smb2_rsp(struct ksmbd_work *work) init_chained_smb2_rsp() argument 443 is_chained_smb2_message(struct ksmbd_work *work) is_chained_smb2_message() argument 494 init_smb2_rsp_hdr(struct ksmbd_work *work) init_smb2_rsp_hdr() argument 524 smb2_allocate_rsp_buf(struct ksmbd_work *work) smb2_allocate_rsp_buf() argument 564 smb2_check_user_session(struct ksmbd_work *work) smb2_check_user_session() argument 658 setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) setup_async_work() argument 687 release_async_work(struct ksmbd_work *work) release_async_work() argument 705 smb2_send_interim_resp(struct ksmbd_work *work, __le32 status) smb2_send_interim_resp() argument 1095 smb2_handle_negotiate(struct ksmbd_work *work) smb2_handle_negotiate() argument 1288 generate_preauth_hash(struct ksmbd_work *work) generate_preauth_hash() argument 1336 ntlm_negotiate(struct ksmbd_work *work, struct negotiate_message *negblob, size_t negblob_len, struct smb2_sess_setup_rsp *rsp) ntlm_negotiate() argument 1447 ntlm_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) ntlm_authenticate() argument 1586 krb5_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) krb5_authenticate() argument 1666 krb5_authenticate(struct ksmbd_work *work, struct smb2_sess_setup_req *req, struct smb2_sess_setup_rsp *rsp) krb5_authenticate() argument 1674 smb2_sess_setup(struct ksmbd_work *work) smb2_sess_setup() argument 1941 smb2_tree_connect(struct ksmbd_work *work) smb2_tree_connect() argument 2126 smb2_tree_disconnect(struct ksmbd_work *work) smb2_tree_disconnect() argument 2190 smb2_session_logoff(struct ksmbd_work *work) smb2_session_logoff() argument 2252 create_smb2_pipe(struct ksmbd_work *work) create_smb2_pipe() argument 2559 smb2_creat(struct ksmbd_work *work, struct path *parent_path, struct path *path, char *name, int open_flags, umode_t posix_mode, bool is_dir) smb2_creat() argument 2597 smb2_create_sd_buffer(struct ksmbd_work *work, struct smb2_create_req *req, const struct path *path) smb2_create_sd_buffer() argument 2651 smb2_open(struct ksmbd_work *work) smb2_open() argument 3775 struct ksmbd_work *work; global() member 4003 smb2_resp_buf_len(struct ksmbd_work *work, unsigned short hdr2_len) smb2_resp_buf_len() argument 4012 smb2_calc_max_out_buf_len(struct ksmbd_work *work, unsigned short hdr2_len, unsigned int out_buf_len) smb2_calc_max_out_buf_len() argument 4028 smb2_query_dir(struct ksmbd_work *work) smb2_query_dir() argument 4313 smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp, void *rsp_org) smb2_get_ea() argument 4541 get_file_all_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) get_file_all_info() argument 4605 get_file_alternate_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) get_file_alternate_info() argument 4626 get_file_stream_info(struct ksmbd_work *work, struct smb2_query_info_rsp *rsp, struct ksmbd_file *fp, void *rsp_org) get_file_stream_info() argument 4887 smb2_get_info_file(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) smb2_get_info_file() argument 5003 smb2_get_info_filesystem(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) smb2_get_info_filesystem() argument 5216 smb2_get_info_sec(struct ksmbd_work *work, struct smb2_query_info_req *req, struct smb2_query_info_rsp *rsp) smb2_get_info_sec() argument 5298 smb2_query_info(struct ksmbd_work *work) smb2_query_info() argument 5361 smb2_close_pipe(struct ksmbd_work *work) smb2_close_pipe() argument 5393 smb2_close(struct ksmbd_work *work) smb2_close() argument 5508 smb2_echo(struct ksmbd_work *work) smb2_echo() argument 5520 smb2_rename(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_rename_info *file_info, struct nls_table *local_nls) smb2_rename() argument 5591 smb2_create_link(struct ksmbd_work *work, struct ksmbd_share_config *share, struct smb2_file_link_info *file_info, unsigned int buf_len, struct file *filp, struct nls_table *local_nls) smb2_create_link() argument 5750 set_file_allocation_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_alloc_info *file_alloc_info) set_file_allocation_info() argument 5800 set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_eof_info *file_eof_info) set_end_of_file_info() argument 5833 set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_file_rename_info *rename_info, unsigned int buf_len) set_rename_info() argument 5928 smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, struct smb2_set_info_req *req, struct ksmbd_share_config *share) smb2_set_info_file() argument 6036 smb2_set_info(struct ksmbd_work *work) smb2_set_info() argument 6142 smb2_read_pipe(struct ksmbd_work *work) smb2_read_pipe() argument 6203 smb2_set_remote_key_for_rdma(struct ksmbd_work *work, struct smb2_buffer_desc_v1 *desc, __le32 Channel, __le16 ChannelInfoLength) smb2_set_remote_key_for_rdma() argument 6233 smb2_read_rdma_channel(struct ksmbd_work *work, struct smb2_read_req *req, void *data_buf, size_t length) smb2_read_rdma_channel() argument 6255 smb2_read(struct ksmbd_work *work) smb2_read() argument 6425 smb2_write_pipe(struct ksmbd_work *work) smb2_write_pipe() argument 6486 smb2_write_rdma_channel(struct ksmbd_work *work, struct smb2_write_req *req, struct ksmbd_file *fp, loff_t offset, size_t length, bool sync) smb2_write_rdma_channel() argument 6522 smb2_write(struct ksmbd_work *work) smb2_write() argument 6665 smb2_flush(struct ksmbd_work *work) smb2_flush() argument 6695 smb2_cancel(struct ksmbd_work *work) smb2_cancel() argument 6863 smb2_lock(struct ksmbd_work *work) smb2_lock() argument 7220 fsctl_copychunk(struct ksmbd_work *work, struct copychunk_ioctl_req *ci_req, unsigned int cnt_code, unsigned int input_count, unsigned long long volatile_id, unsigned long long persistent_id, struct smb2_ioctl_rsp *rsp) fsctl_copychunk() argument 7500 fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id, struct file_allocated_range_buffer *qar_req, struct file_allocated_range_buffer *qar_rsp, unsigned int in_count, unsigned int *out_count) fsctl_query_allocated_ranges() argument 7532 fsctl_pipe_transceive(struct ksmbd_work *work, u64 id, unsigned int out_buf_len, struct smb2_ioctl_req *req, struct smb2_ioctl_rsp *rsp) fsctl_pipe_transceive() argument 7577 fsctl_set_sparse(struct ksmbd_work *work, u64 id, struct file_sparse *sparse) fsctl_set_sparse() argument 7619 fsctl_request_resume_key(struct ksmbd_work *work, struct smb2_ioctl_req *req, struct resume_key_ioctl_rsp *key_rsp) fsctl_request_resume_key() argument 7643 smb2_ioctl(struct ksmbd_work *work) smb2_ioctl() argument 7989 smb20_oplock_break_ack(struct ksmbd_work *work) smb20_oplock_break_ack() argument 8135 smb21_lease_break_ack(struct ksmbd_work *work) smb21_lease_break_ack() argument 8272 smb2_oplock_break(struct ksmbd_work *work) smb2_oplock_break() argument 8302 smb2_notify(struct ksmbd_work *work) smb2_notify() argument 8327 smb2_is_sign_req(struct ksmbd_work *work, unsigned int command) smb2_is_sign_req() argument 8346 smb2_check_sign_req(struct ksmbd_work *work) smb2_check_sign_req() argument 8389 smb2_set_sign_rsp(struct ksmbd_work *work) smb2_set_sign_rsp() argument 8418 smb3_check_sign_req(struct ksmbd_work *work) smb3_check_sign_req() argument 8477 smb3_set_sign_rsp(struct ksmbd_work *work) smb3_set_sign_rsp() argument 8523 smb3_preauth_hash_rsp(struct ksmbd_work *work) smb3_preauth_hash_rsp() argument 8579 smb3_encrypt_resp(struct ksmbd_work *work) smb3_encrypt_resp() argument 8606 smb3_decrypt_req(struct ksmbd_work *work) smb3_decrypt_req() argument 8649 smb3_11_final_sess_setup_resp(struct ksmbd_work *work) smb3_11_final_sess_setup_resp() argument [all...] |
H A D | ksmbd_work.h | 86 struct work_struct work; member 97 * @work: smb work containing response buffer 99 static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work) in ksmbd_resp_buf_next() argument 101 return work->response_buf + work->next_smb2_rsp_hdr_off + 4; in ksmbd_resp_buf_next() 106 * @work: smb work containing response buffer 108 static inline void *ksmbd_resp_buf_curr(struct ksmbd_work *work) in ksmbd_resp_buf_curr() argument 110 return work in ksmbd_resp_buf_curr() 117 ksmbd_req_buf_next(struct ksmbd_work *work) ksmbd_req_buf_next() argument [all...] |
/kernel/linux/linux-6.6/virt/kvm/ |
H A D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 59 * This work is run asynchronously to the task which owns in async_pf_execute() 92 static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work) in kvm_flush_and_free_async_pf_work() argument 95 * The async #PF is "done", but KVM must wait for the work item itself, in kvm_flush_and_free_async_pf_work() 98 * after the last call to module_put(). Note, flushing the work item in kvm_flush_and_free_async_pf_work() 104 * need to be flushed (but sanity check that the work wasn't queued). in kvm_flush_and_free_async_pf_work() 106 if (work->wakeup_all) in kvm_flush_and_free_async_pf_work() 107 WARN_ON_ONCE(work in kvm_flush_and_free_async_pf_work() 119 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 144 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 160 struct kvm_async_pf *work; kvm_check_async_pf_completion() local 187 struct kvm_async_pf *work; kvm_setup_async_pf() local 225 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local [all...] |
/kernel/linux/linux-5.10/virt/kvm/ |
H A D | async_pf.c | 45 static void async_pf_execute(struct work_struct *work) in async_pf_execute() argument 48 container_of(work, struct kvm_async_pf, work); in async_pf_execute() 59 * This work is run asynchronously to the task which owns in async_pf_execute() 98 /* cancel outstanding work queue item */ in kvm_clear_async_pf_completion_queue() 100 struct kvm_async_pf *work = in kvm_clear_async_pf_completion_queue() local 102 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 103 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 109 if (!work->vcpu) in kvm_clear_async_pf_completion_queue() 114 flush_work(&work in kvm_clear_async_pf_completion_queue() 126 struct kvm_async_pf *work = kvm_clear_async_pf_completion_queue() local 139 struct kvm_async_pf *work; kvm_check_async_pf_completion() local 166 struct kvm_async_pf *work; kvm_setup_async_pf() local 205 struct kvm_async_pf *work; kvm_async_pf_wakeup_all() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 31 * drm_flip_work_allocate_task - allocate a flip-work task 51 * @work: the flip-work 55 * func) on a work queue after drm_flip_work_commit() is called. 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 69 * drm_flip_work_queue - queue work 70 * @work 76 drm_flip_work_queue(struct drm_flip_work *work, void *val) drm_flip_work_queue() argument 101 drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq) drm_flip_work_commit() argument 116 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); flip_worker() local 147 drm_flip_work_init(struct drm_flip_work *work, const char *name, drm_flip_func_t func) drm_flip_work_init() argument 166 drm_flip_work_cleanup(struct drm_flip_work *work) drm_flip_work_cleanup() argument [all...] |
H A D | drm_vblank_work.c | 38 * generic delayed work implementation which delays work execution until a 39 * particular vblank has passed, and then executes the work at realtime 43 * re-arming work items can be easily implemented. 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 67 /* Handle cancelling any pending vblank work item 72 struct drm_vblank_work *work, *next; drm_vblank_cancel_pending_works() local 106 drm_vblank_work_schedule(struct drm_vblank_work *work, u64 count, bool nextonmiss) drm_vblank_work_schedule() argument 182 drm_vblank_work_cancel_sync(struct drm_vblank_work *work) drm_vblank_work_cancel_sync() argument 218 drm_vblank_work_flush(struct drm_vblank_work *work) drm_vblank_work_flush() argument 240 drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, void (*func)(struct kthread_work *work)) drm_vblank_work_init() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 31 * drm_flip_work_allocate_task - allocate a flip-work task 51 * @work: the flip-work 55 * func) on a work queue after drm_flip_work_commit() is called. 57 void drm_flip_work_queue_task(struct drm_flip_work *work, in drm_flip_work_queue_task() argument 62 spin_lock_irqsave(&work->lock, flags); in drm_flip_work_queue_task() 63 list_add_tail(&task->node, &work->queued); in drm_flip_work_queue_task() 64 spin_unlock_irqrestore(&work->lock, flags); in drm_flip_work_queue_task() 69 * drm_flip_work_queue - queue work 70 * @work 76 drm_flip_work_queue(struct drm_flip_work *work, void *val) drm_flip_work_queue() argument 101 drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq) drm_flip_work_commit() argument 116 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); flip_worker() local 147 drm_flip_work_init(struct drm_flip_work *work, const char *name, drm_flip_func_t func) drm_flip_work_init() argument 166 drm_flip_work_cleanup(struct drm_flip_work *work) drm_flip_work_cleanup() argument [all...] |
H A D | drm_vblank_work.c | 38 * generic delayed work implementation which delays work execution until a 39 * particular vblank has passed, and then executes the work at realtime 43 * re-arming work items can be easily implemented. 48 struct drm_vblank_work *work, *next; in drm_handle_vblank_works() local 54 list_for_each_entry_safe(work, next, &vblank->pending_work, node) { in drm_handle_vblank_works() 55 if (!drm_vblank_passed(count, work->count)) in drm_handle_vblank_works() 58 list_del_init(&work->node); in drm_handle_vblank_works() 60 kthread_queue_work(vblank->worker, &work->base); in drm_handle_vblank_works() 67 /* Handle cancelling any pending vblank work item 72 struct drm_vblank_work *work, *next; drm_vblank_cancel_pending_works() local 106 drm_vblank_work_schedule(struct drm_vblank_work *work, u64 count, bool nextonmiss) drm_vblank_work_schedule() argument 182 drm_vblank_work_cancel_sync(struct drm_vblank_work *work) drm_vblank_work_cancel_sync() argument 218 drm_vblank_work_flush(struct drm_vblank_work *work) drm_vblank_work_flush() argument 240 drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc, void (*func)(struct kthread_work *work)) drm_vblank_work_init() argument [all...] |
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | workqueue.h | 14 * workqueue_queue_work - called when a work gets queued 17 * @work: pointer to struct work_struct 19 * This event occurs when a work is queued immediately or once a 20 * delayed work is actually queued on a workqueue (ie: once the delay 26 struct work_struct *work), 28 TP_ARGS(req_cpu, pwq, work), 31 __field( void *, work ) 39 __entry->work = work; 40 __entry->function = work [all...] |
/kernel/linux/linux-6.6/kernel/ |
H A D | task_work.c | 9 * task_work_add - ask the @task to execute @work->func() 11 * @work: the callback to run 14 * Queue @work for task_work_run() below and notify the @task if @notify 25 * @TWA_RESUME work is run only when the task exits the kernel and returns to 28 * Fails if the @task is exiting/exited and thus it can't process this @work. 29 * Otherwise @work->func() will be called when the @task goes through one of 32 * If the targeted task is exiting, then an error is returned and the work item 42 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 47 /* record the work call stack in order to print it in KASAN reports */ in task_work_add() 48 kasan_record_aux_stack(work); in task_work_add() 92 struct callback_head *work; task_work_cancel_match() local 150 struct callback_head *work, *head, *next; task_work_run() local [all...] |
H A D | irq_work.c | 57 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 61 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); in irq_work_claim() 63 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 79 static __always_inline void irq_work_raise(struct irq_work *work) in irq_work_raise() argument 82 trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); in irq_work_raise() 87 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 88 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 95 work_flags = atomic_read(&work->node.a_flags); in __irq_work_queue_local() 107 if (!llist_add(&work->node.llist, list)) in __irq_work_queue_local() 110 /* If the work i in __irq_work_queue_local() 116 irq_work_queue(struct irq_work *work) irq_work_queue() argument 137 irq_work_queue_on(struct irq_work *work, int cpu) irq_work_queue_on() argument 203 struct irq_work *work = arg; irq_work_single() local 237 struct irq_work *work, *tmp; irq_work_run_list() local 286 irq_work_sync(struct irq_work *work) irq_work_sync() argument [all...] |
H A D | kthread.c | 307 * functions which do some additional work in non-modular code such as 777 * when they finish. There is defined a safe point for freezing when one work 786 struct kthread_work *work; in kthread_worker_fn() local 809 work = NULL; in kthread_worker_fn() 812 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 814 list_del_init(&work->node); in kthread_worker_fn() 816 worker->current_work = work; in kthread_worker_fn() 819 if (work) { in kthread_worker_fn() 820 kthread_work_func_t func = work->func; in kthread_worker_fn() 822 trace_sched_kthread_work_execute_start(work); in kthread_worker_fn() 951 queuing_blocked(struct kthread_worker *worker, struct kthread_work *work) queuing_blocked() argument 959 kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work) kthread_insert_work_sanity_check() argument 969 kthread_insert_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) kthread_insert_work() argument 995 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work) kthread_queue_work() argument 1022 struct kthread_work *work = &dwork->work; kthread_delayed_work_timer_fn() local 1052 struct kthread_work *work = &dwork->work; __kthread_queue_delayed_work() local 1095 struct kthread_work *work = &dwork->work; kthread_queue_delayed_work() local 1112 struct kthread_work work; global() member 1116 kthread_flush_work_fn(struct kthread_work *work) kthread_flush_work_fn() argument 1129 kthread_flush_work(struct kthread_work *work) kthread_flush_work() argument 1168 kthread_cancel_delayed_work_timer(struct kthread_work *work, unsigned long *flags) kthread_cancel_delayed_work_timer() argument 1201 __kthread_cancel_work(struct kthread_work *work) __kthread_cancel_work() argument 1242 struct kthread_work *work = &dwork->work; kthread_mod_delayed_work() local 1285 __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) __kthread_cancel_work_sync() argument 1338 kthread_cancel_work_sync(struct kthread_work *work) kthread_cancel_work_sync() argument [all...] |
/kernel/linux/linux-5.10/kernel/ |
H A D | task_work.c | 9 * task_work_add - ask the @task to execute @work->func() 11 * @work: the callback to run 14 * Queue @work for task_work_run() below and notify the @task if @notify 17 * work is run only when the task exits the kernel and returns to user mode, 19 * it can't process this @work. Otherwise @work->func() will be called when the 22 * If the targeted task is exiting, then an error is returned and the work item 32 int task_work_add(struct task_struct *task, struct callback_head *work, in task_work_add() argument 41 work->next = head; in task_work_add() 42 } while (cmpxchg(&task->task_works, head, work) ! in task_work_add() 75 struct callback_head *work; task_work_cancel_match() local 131 struct callback_head *work, *head, *next; task_work_run() local [all...] |
H A D | irq_work.c | 30 static bool irq_work_claim(struct irq_work *work) in irq_work_claim() argument 34 oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); in irq_work_claim() 36 * If the work is already pending, no need to raise the IPI. in irq_work_claim() 52 /* Enqueue on current CPU, work must already be claimed and preempt disabled */ 53 static void __irq_work_queue_local(struct irq_work *work) in __irq_work_queue_local() argument 55 /* If the work is "lazy", handle it from next tick if any */ in __irq_work_queue_local() 56 if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { in __irq_work_queue_local() 57 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && in __irq_work_queue_local() 61 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) in __irq_work_queue_local() 66 /* Enqueue the irq work 67 irq_work_queue(struct irq_work *work) irq_work_queue() argument 88 irq_work_queue_on(struct irq_work *work, int cpu) irq_work_queue_on() argument 135 struct irq_work *work = arg; irq_work_single() local 160 struct irq_work *work, *tmp; irq_work_run_list() local 197 irq_work_sync(struct irq_work *work) irq_work_sync() argument [all...] |
H A D | kthread.c | 710 * when they finish. There is defined a safe point for freezing when one work 719 struct kthread_work *work; in kthread_worker_fn() local 742 work = NULL; in kthread_worker_fn() 745 work = list_first_entry(&worker->work_list, in kthread_worker_fn() 747 list_del_init(&work->node); in kthread_worker_fn() 749 worker->current_work = work; in kthread_worker_fn() 752 if (work) { in kthread_worker_fn() 754 work->func(work); in kthread_worker_fn() 855 * Returns true when the work coul 859 queuing_blocked(struct kthread_worker *worker, struct kthread_work *work) queuing_blocked() argument 867 kthread_insert_work_sanity_check(struct kthread_worker *worker, struct kthread_work *work) kthread_insert_work_sanity_check() argument 877 kthread_insert_work(struct kthread_worker *worker, struct kthread_work *work, struct list_head *pos) kthread_insert_work() argument 901 kthread_queue_work(struct kthread_worker *worker, struct kthread_work *work) kthread_queue_work() argument 928 struct kthread_work *work = &dwork->work; kthread_delayed_work_timer_fn() local 958 struct kthread_work *work = &dwork->work; __kthread_queue_delayed_work() local 1002 struct kthread_work *work = &dwork->work; kthread_queue_delayed_work() local 1019 struct kthread_work work; global() member 1023 kthread_flush_work_fn(struct kthread_work *work) kthread_flush_work_fn() argument 1036 kthread_flush_work(struct kthread_work *work) kthread_flush_work() argument 1075 kthread_cancel_delayed_work_timer(struct kthread_work *work, unsigned long *flags) kthread_cancel_delayed_work_timer() argument 1108 __kthread_cancel_work(struct kthread_work *work) __kthread_cancel_work() argument 1149 struct kthread_work *work = &dwork->work; kthread_mod_delayed_work() local 1192 __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork) __kthread_cancel_work_sync() argument 1245 kthread_cancel_work_sync(struct kthread_work *work) kthread_cancel_work_sync() argument [all...] |
/kernel/linux/linux-6.6/fs/btrfs/ |
H A D | async-thread.c | 29 /* List head pointing to ordered work list */ 55 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 57 return work->wq->fs_info; in btrfs_work_owner() 163 * Hook for threshold which will be called before executing the work, 214 struct btrfs_work *work; in run_ordered_work() local 223 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 225 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 231 * updates from ordinary work function. in run_ordered_work() 237 * we leave the work item on the list as a barrier so in run_ordered_work() 238 * that later work item in run_ordered_work() 296 struct btrfs_work *work = container_of(normal_work, struct btrfs_work, btrfs_work_helper() local 331 btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, btrfs_func_t ordered_func, btrfs_func_t ordered_free) btrfs_init_work() argument 342 btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) btrfs_queue_work() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | completion.h | 36 #define COMPLETION_INITIALIZER(work) \ 37 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 39 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 40 (*({ init_completion_map(&(work), &(map)); &(work); })) 42 #define COMPLETION_INITIALIZER_ONSTACK(work) \ 43 (*({ init_completion(&work); &work; })) 47 * @work: identifier for the completion structure 53 #define DECLARE_COMPLETION(work) \ [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | completion.h | 35 #define COMPLETION_INITIALIZER(work) \ 36 { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } 38 #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ 39 (*({ init_completion_map(&(work), &(map)); &(work); })) 41 #define COMPLETION_INITIALIZER_ONSTACK(work) \ 42 (*({ init_completion(&work); &work; })) 46 * @work: identifier for the completion structure 52 #define DECLARE_COMPLETION(work) \ [all...] |
/kernel/linux/linux-5.10/fs/btrfs/ |
H A D | async-thread.c | 30 /* List head pointing to ordered work list */ 61 struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) in btrfs_work_owner() argument 63 return work->wq->fs_info; in btrfs_work_owner() 173 * Hook for threshold which will be called before executing the work, 224 struct btrfs_work *work; in run_ordered_work() local 233 work = list_entry(list->next, struct btrfs_work, in run_ordered_work() 235 if (!test_bit(WORK_DONE_BIT, &work->flags)) in run_ordered_work() 241 * updates from ordinary work function. in run_ordered_work() 247 * we leave the work item on the list as a barrier so in run_ordered_work() 248 * that later work item in run_ordered_work() 306 struct btrfs_work *work = container_of(normal_work, struct btrfs_work, btrfs_work_helper() local 342 btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, btrfs_func_t ordered_func, btrfs_func_t ordered_free) btrfs_init_work() argument 353 __btrfs_queue_work(struct __btrfs_workqueue *wq, struct btrfs_work *work) __btrfs_queue_work() argument 369 btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work) btrfs_queue_work() argument 408 btrfs_set_work_high_priority(struct btrfs_work *work) btrfs_set_work_high_priority() argument [all...] |
/kernel/linux/linux-5.10/drivers/staging/octeon/ |
H A D | ethernet-rx.c | 59 * @work: Work queue entry pointing to the packet. 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work in cvm_oct_check_rcv_error() 142 copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb) copy_segments_to_skb() argument 222 struct cvmx_wqe *work; cvm_oct_poll() local [all...] |
/kernel/linux/linux-6.6/drivers/staging/octeon/ |
H A D | ethernet-rx.c | 59 * @work: Work queue entry pointing to the packet. 63 static inline int cvm_oct_check_rcv_error(struct cvmx_wqe *work) in cvm_oct_check_rcv_error() argument 68 port = work->word0.pip.cn68xx.pknd; in cvm_oct_check_rcv_error() 70 port = work->word1.cn38xx.ipprt; in cvm_oct_check_rcv_error() 72 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) in cvm_oct_check_rcv_error() 81 if (work->word2.snoip.err_code == 5 || in cvm_oct_check_rcv_error() 82 work->word2.snoip.err_code == 7) { in cvm_oct_check_rcv_error() 99 cvmx_phys_to_ptr(work->packet_ptr.s.addr); in cvm_oct_check_rcv_error() 102 while (i < work in cvm_oct_check_rcv_error() 142 copy_segments_to_skb(struct cvmx_wqe *work, struct sk_buff *skb) copy_segments_to_skb() argument 222 struct cvmx_wqe *work; cvm_oct_poll() local [all...] |