Lines Matching defs:work

243  * struct binder_work - work enqueued on a worklist
245 * @type: type of work to be performed
247 * There are separate work lists for proc, thread, and node (async).
264 struct binder_work work;
273 * @work: worklist element for node work
323 * @async_todo: list of async work items
331 struct binder_work work;
368 * @work: worklist element for death notifications
372 struct binder_work work;
443 * @waiting_threads: threads currently waiting for proc work
454 * @deferred_work: bitmap of deferred work to perform
459 * @todo: list of work for this process
543 * @todo: list of work to do for this thread
545 * @process_todo: whether work in @todo should be processed
551 * @wait: wait queue for thread work
606 struct binder_work work;
809 * binder_worklist_empty() - Check if no items on the work list
827 * binder_enqueue_work_ilocked() - Add an item to the work list
828 * @work: struct binder_work to add to list
829 * @target_list: list to add work to
831 * Adds the work to the specified list. Asserts that work
837 binder_enqueue_work_ilocked(struct binder_work *work,
841 BUG_ON(work->entry.next && !list_empty(&work->entry));
842 list_add_tail(&work->entry, target_list);
846 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
847 * @thread: thread to queue work to
848 * @work: struct binder_work to add to list
850 * Adds the work to the todo list of the thread. Doesn't set the process_todo
852 * sleep without handling this work when it calls read.
858 struct binder_work *work)
861 binder_enqueue_work_ilocked(work, &thread->todo);
865 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
866 * @thread: thread to queue work to
867 * @work: struct binder_work to add to list
869 * Adds the work to the todo list of the thread, and enables processing
876 struct binder_work *work)
879 binder_enqueue_work_ilocked(work, &thread->todo);
882 * queuing their own work; they rely on these events to consume
884 * indefinitely without handling the work.
894 * binder_enqueue_thread_work() - Add an item to the thread work list
895 * @thread: thread to queue work to
896 * @work: struct binder_work to add to list
898 * Adds the work to the todo list of the thread, and enables processing
903 struct binder_work *work)
906 binder_enqueue_thread_work_ilocked(thread, work);
911 binder_dequeue_work_ilocked(struct binder_work *work)
913 list_del_init(&work->entry);
917 * binder_dequeue_work() - Removes an item from the work list
919 * @work: struct binder_work to remove from list
921 * Removes the specified work item from whatever list it is on.
922 * Can safely be called if work is not on any list.
925 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
928 binder_dequeue_work_ilocked(work);
1006 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1014 * Return: If there's a thread currently waiting for process work,
1034 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1044 * Note that for this function to work as expected, callers
1046 * to handle the work (if they don't have a thread already),
1063 /* Didn't find a thread waiting for proc work; this can happen
1067 * the kernel driver soon and pick up this work.
1071 * over all threads not handling transaction work, and
1074 * work currently.
1185 node->work.type = BINDER_WORK_NODE;
1190 INIT_LIST_HEAD(&node->work.entry);
1252 binder_dequeue_work_ilocked(&node->work);
1255 &node->work);
1260 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1269 binder_enqueue_work_ilocked(&node->work, target_list);
1311 if (list_empty(&node->work.entry)) {
1312 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1319 binder_dequeue_work_ilocked(&node->work);
1325 BUG_ON(!list_empty(&node->work.entry));
1374 * needed to serialize with the node work on the queue (which
1577 binder_dequeue_work(ref->proc, &ref->death->work);
1587 * @target_list: list to queue node work on
2012 &target_thread->reply_error.work);
2273 * @twork: callback_head for task work
2276 * Structure to pass task work to be handled after
2286 * @twork: callback head for task work
3082 * wake it up. If no thread is found, the work is queued to the proc
3121 binder_enqueue_thread_work_ilocked(thread, &t->work);
3123 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3125 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3844 t->work.type = BINDER_WORK_TRANSACTION;
3858 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3979 binder_enqueue_thread_work(thread, &thread->return_error.work);
3983 binder_enqueue_thread_work(thread, &thread->return_error.work);
4320 &thread->return_error.work);
4363 INIT_LIST_HEAD(&death->work.entry);
4367 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4371 &ref->death->work, &proc->todo);
4395 if (list_empty(&death->work.entry)) {
4396 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4402 &death->work);
4405 &death->work,
4411 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4412 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4434 work);
4451 binder_dequeue_work_ilocked(&death->work);
4452 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4453 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4458 thread, &death->work);
4461 &death->work,
4649 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4705 t = container_of(w, struct binder_transaction, work);
4709 w, struct binder_error, work);
4736 struct binder_node *node = container_of(w, struct binder_node, work);
4829 death = container_of(w, struct binder_ref_death, work);
4865 pr_err("%d:%d: bad work type %d\n",
5054 t = container_of(w, struct binder_transaction, work);
5061 w, struct binder_error, work);
5077 death = container_of(w, struct binder_ref_death, work);
5087 pr_err("unexpected work type, %d, not freed\n",
5125 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5127 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5888 binder_dequeue_work_ilocked(&node->work);
5926 BUG_ON(!list_empty(&ref->death->work.entry));
5927 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5928 binder_enqueue_work_ilocked(&ref->death->work,
6027 static void binder_deferred_func(struct work_struct *work)
6120 t = container_of(w, struct binder_transaction, work);
6126 w, struct binder_error, work);
6135 node = container_of(w, struct binder_node, work);
6136 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6150 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6645 t = container_of(w, struct binder_transaction, work);