Lines Matching defs:work

436  * binder_worklist_empty() - Check if no items on the work list
454 * binder_enqueue_work_ilocked() - Add an item to the work list
455 * @work: struct binder_work to add to list
456 * @target_list: list to add work to
458 * Adds the work to the specified list. Asserts that work
464 binder_enqueue_work_ilocked(struct binder_work *work,
468 BUG_ON(work->entry.next && !list_empty(&work->entry));
469 list_add_tail(&work->entry, target_list);
473 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
474 * @thread: thread to queue work to
475 * @work: struct binder_work to add to list
477 * Adds the work to the todo list of the thread. Doesn't set the process_todo
479 * sleep without handling this work when it calls read.
485 struct binder_work *work)
488 binder_enqueue_work_ilocked(work, &thread->todo);
492 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
493 * @thread: thread to queue work to
494 * @work: struct binder_work to add to list
496 * Adds the work to the todo list of the thread, and enables processing
503 struct binder_work *work)
506 binder_enqueue_work_ilocked(work, &thread->todo);
509 * queuing their own work; they rely on these events to consume
511 * indefinitely without handling the work.
521 * binder_enqueue_thread_work() - Add an item to the thread work list
522 * @thread: thread to queue work to
523 * @work: struct binder_work to add to list
525 * Adds the work to the todo list of the thread, and enables processing
530 struct binder_work *work)
533 binder_enqueue_thread_work_ilocked(thread, work);
538 binder_dequeue_work_ilocked(struct binder_work *work)
540 list_del_init(&work->entry);
544 * binder_dequeue_work() - Removes an item from the work list
546 * @work: struct binder_work to remove from list
548 * Removes the specified work item from whatever list it is on.
549 * Can safely be called if work is not on any list.
552 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
555 binder_dequeue_work_ilocked(work);
633 * binder_select_thread_ilocked() - selects a thread for doing proc work.
641 * Return: If there's a thread currently waiting for process work,
661 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
671 * Note that for this function to work as expected, callers
673 * to handle the work (if they don't have a thread already),
690 /* Didn't find a thread waiting for proc work; this can happen
694 * the kernel driver soon and pick up this work.
698 * over all threads not handling transaction work, and
701 * work currently.
812 node->work.type = BINDER_WORK_NODE;
817 INIT_LIST_HEAD(&node->work.entry);
879 binder_dequeue_work_ilocked(&node->work);
882 &node->work);
887 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
896 binder_enqueue_work_ilocked(&node->work, target_list);
938 if (list_empty(&node->work.entry)) {
939 binder_enqueue_work_ilocked(&node->work, &proc->todo);
946 binder_dequeue_work_ilocked(&node->work);
952 BUG_ON(!list_empty(&node->work.entry));
1001 * needed to serialize with the node work on the queue (which
1204 binder_dequeue_work(ref->proc, &ref->death->work);
1214 * @target_list: list to queue node work on
1663 &target_thread->reply_error.work);
1922 * @twork: callback_head for task work
1925 * Structure to pass task work to be handled after
1935 * @twork: callback head for task work
2769 t_queued = container_of(w, struct binder_transaction, work);
2784 * wake it up. If no thread is found, the work is queued to the proc
2835 binder_enqueue_thread_work_ilocked(thread, &t->work);
2837 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2846 list_del_init(&t_outdated->work.entry);
2850 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3200 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3652 t->work.type = BINDER_WORK_TRANSACTION;
3667 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3810 binder_enqueue_thread_work(thread, &thread->return_error.work);
3818 binder_enqueue_thread_work(thread, &thread->return_error.work);
4156 &thread->return_error.work);
4199 INIT_LIST_HEAD(&death->work.entry);
4203 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4207 &ref->death->work, &proc->todo);
4231 if (list_empty(&death->work.entry)) {
4232 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4238 &death->work);
4241 &death->work,
4247 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4248 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4270 work);
4287 binder_dequeue_work_ilocked(&death->work);
4288 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4289 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4294 thread, &death->work);
4297 &death->work,
4472 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4528 t = container_of(w, struct binder_transaction, work);
4532 w, struct binder_error, work);
4567 struct binder_node *node = container_of(w, struct binder_node, work);
4660 death = container_of(w, struct binder_ref_death, work);
4696 pr_err("%d:%d: bad work type %d\n",
4885 t = container_of(w, struct binder_transaction, work);
4892 w, struct binder_error, work);
4910 death = container_of(w, struct binder_ref_death, work);
4920 pr_err("unexpected work type, %d, not freed\n",
4958 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4960 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5932 binder_dequeue_work_ilocked(&node->work);
5970 BUG_ON(!list_empty(&ref->death->work.entry));
5971 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5972 binder_enqueue_work_ilocked(&ref->death->work,
6074 static void binder_deferred_func(struct work_struct *work)
6169 t = container_of(w, struct binder_transaction, work);
6175 w, struct binder_error, work);
6184 node = container_of(w, struct binder_node, work);
6185 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6199 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6734 t = container_of(w, struct binder_transaction, work);