Lines Matching refs:proc

15  * 1) proc->outer_lock : protects binder_ref
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
24 * (proc->todo, thread->todo, proc->delivered_death and
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
98 DEFINE_SHOW_ATTRIBUTE(proc);
247 * There are separate work lists for proc, thread, and node (async).
274 * (protected by @proc->inner_lock)
275 * @rb_node: element for proc->nodes tree
276 * (protected by @proc->inner_lock)
279 * @proc: binder_proc that owns this node
285 * (protected by @proc->inner_lock if @proc
288 * (protected by @proc->inner_lock if @proc
291 * (protected by @proc->inner_lock if @proc
294 * (protected by @proc->inner_lock while @proc
296 * if @proc is NULL. During inc/dec and node release
298 * as the node dies and @proc becomes NULL)
304 * (protected by @proc->inner_lock if @proc
307 * (protected by @proc->inner_lock if @proc
310 * (protected by @proc->inner_lock if @proc
313 * (protected by @proc->inner_lock if @proc
324 * (protected by @proc->inner_lock)
336 struct binder_proc *proc;
347 * proc inner_lock
369 * (protected by inner_lock of the proc that
398 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
399 * @rb_node_node: node for lookup by @node in proc's rb_tree
402 * @proc: binder_proc containing ref
410 * structure is unsafe to access without holding @proc->outer_lock.
414 /* node + proc => ref (transaction) */
415 /* desc + proc => ref (transaction, inc/dec ref) */
416 /* node => refs + procs (proc exit) */
421 struct binder_proc *proc;
434 * @threads: rbtree of binder_threads in this proc
437 * this proc ordered by node->ptr
443 * @waiting_threads: threads currently waiting for proc work
473 * @tmp_ref: temporary reference to indicate proc is in use
479 * @context: binder_context for this proc
529 * @proc: binder process for this thread
531 * @rb_node: element for proc->threads rbtree
532 * (protected by @proc->inner_lock)
533 * @waiting_thread_node: element for @proc->waiting_threads list
534 * (protected by @proc->inner_lock)
542 * (protected by @proc->inner_lock)
544 * (protected by @proc->inner_lock)
546 * (protected by @proc->inner_lock)
550 * (protected by @proc->inner_lock)
555 * (atomic since @proc->inner_lock cannot
559 * (protected by @proc->inner_lock)
564 struct binder_proc *proc;
663 * @proc: struct binder_proc to acquire
665 * Acquires proc->outer_lock. Used to protect binder_ref
666 * structures associated with the given proc.
668 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
670 _binder_proc_lock(struct binder_proc *proc, int line)
671 __acquires(&proc->outer_lock)
675 spin_lock(&proc->outer_lock);
680 * @proc: struct binder_proc to acquire
686 _binder_proc_unlock(struct binder_proc *proc, int line)
687 __releases(&proc->outer_lock)
691 spin_unlock(&proc->outer_lock);
696 * @proc: struct binder_proc to acquire
698 * Acquires proc->inner_lock. Used to protect todo lists
700 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
702 _binder_inner_proc_lock(struct binder_proc *proc, int line)
703 __acquires(&proc->inner_lock)
707 spin_lock(&proc->inner_lock);
712 * @proc: struct binder_proc to acquire
716 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
718 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
719 __releases(&proc->inner_lock)
723 spin_unlock(&proc->inner_lock);
762 * Acquires node->lock. If node->proc also acquires
763 * proc->inner_lock. Used to protect binder_node fields
768 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
773 if (node->proc)
774 binder_inner_proc_lock(node->proc);
777 __acquire(&node->proc->inner_lock);
789 __releases(&node->lock) __releases(&node->proc->inner_lock)
791 struct binder_proc *proc = node->proc;
795 if (proc)
796 binder_inner_proc_unlock(proc);
799 __release(&node->proc->inner_lock);
810 * @proc: binder_proc associated with list
815 static bool binder_worklist_empty(struct binder_proc *proc,
820 binder_inner_proc_lock(proc);
822 binder_inner_proc_unlock(proc);
834 * Requires the proc->inner_lock to be held.
854 * Requires the proc->inner_lock to be held.
872 * Requires the proc->inner_lock to be held.
905 binder_inner_proc_lock(thread->proc);
907 binder_inner_proc_unlock(thread->proc);
918 * @proc: binder_proc associated with list
925 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
927 binder_inner_proc_lock(proc);
929 binder_inner_proc_unlock(proc);
944 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
946 static void binder_free_proc(struct binder_proc *proc);
965 !binder_worklist_empty_ilocked(&thread->proc->todo));
972 binder_inner_proc_lock(thread->proc);
974 binder_inner_proc_unlock(thread->proc);
987 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
993 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1006 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1007 * @proc: process to select a thread from
1018 binder_select_thread_ilocked(struct binder_proc *proc)
1022 assert_spin_locked(&proc->inner_lock);
1023 thread = list_first_entry_or_null(&proc->waiting_threads,
1034 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1035 * @proc: process to wake up a thread in
1039 * This function wakes up a thread in the @proc process.
1049 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1053 assert_spin_locked(&proc->inner_lock);
1063 /* Didn't find a thread waiting for proc work; this can happen
1076 binder_wakeup_poll_threads_ilocked(proc, sync);
1079 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1081 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1083 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1104 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1107 struct rb_node *n = proc->nodes.rb_node;
1110 assert_spin_locked(&proc->inner_lock);
1132 static struct binder_node *binder_get_node(struct binder_proc *proc,
1137 binder_inner_proc_lock(proc);
1138 node = binder_get_node_ilocked(proc, ptr);
1139 binder_inner_proc_unlock(proc);
1144 struct binder_proc *proc,
1148 struct rb_node **p = &proc->nodes.rb_node;
1155 assert_spin_locked(&proc->inner_lock);
1180 rb_insert_color(&node->rb_node, &proc->nodes);
1182 node->proc = proc;
1194 proc->pid, current->pid, node->debug_id,
1200 static struct binder_node *binder_new_node(struct binder_proc *proc,
1208 binder_inner_proc_lock(proc);
1209 node = binder_init_node_ilocked(proc, new_node, fp);
1210 binder_inner_proc_unlock(proc);
1230 struct binder_proc *proc = node->proc;
1233 if (proc)
1234 assert_spin_locked(&proc->inner_lock);
1239 !(node->proc &&
1240 node == node->proc->context->binder_context_mgr_node &&
1290 struct binder_proc *proc = node->proc;
1293 if (proc)
1294 assert_spin_locked(&proc->inner_lock);
1310 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1312 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1313 binder_wakeup_proc_ilocked(proc);
1318 if (proc) {
1320 rb_erase(&node->rb_node, &proc->nodes);
1376 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1384 if (node->proc)
1385 binder_inner_proc_lock(node->proc);
1389 if (node->proc)
1390 binder_inner_proc_unlock(node->proc);
1407 if (!node->proc)
1413 if (!node->proc)
1434 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1437 struct rb_node *n = proc->refs_by_desc.rb_node;
1459 * @proc: binder_proc that owns the ref
1467 * into the given proc rb_trees and node refs list.
1476 struct binder_proc *proc,
1480 struct binder_context *context = proc->context;
1481 struct rb_node **p = &proc->refs_by_node.rb_node;
1502 new_ref->proc = proc;
1505 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1508 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1515 p = &proc->refs_by_desc.rb_node;
1528 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1535 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1547 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1550 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1551 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1575 ref->proc->pid, ref->data.debug_id,
1577 binder_dequeue_work(ref->proc, &ref->death->work);
1589 * Increment the ref. @ref->proc->outer_lock must be held on entry
1630 ref->proc->pid, ref->data.debug_id,
1641 ref->proc->pid, ref->data.debug_id,
1656 * binder_get_node_from_ref() - get the node from the given proc/desc
1657 * @proc: proc containing the ref
1662 * Given a proc and ref handle, return the associated binder_node
1667 struct binder_proc *proc,
1674 binder_proc_lock(proc);
1675 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1686 binder_proc_unlock(proc);
1691 binder_proc_unlock(proc);
1712 * @proc: proc containing the ref
1718 * Given a proc and ref handle, increment or decrement the ref
1723 static int binder_update_ref_for_handle(struct binder_proc *proc,
1731 binder_proc_lock(proc);
1732 ref = binder_get_ref_olocked(proc, desc, strong);
1744 binder_proc_unlock(proc);
1751 binder_proc_unlock(proc);
1757 * @proc: proc containing the ref
1766 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1769 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1774 * binder_inc_ref_for_node() - increment the ref for given proc/node
1775 * @proc: proc containing the ref
1781 * Given a proc and node, increment the ref. Create the ref if it
1786 static int binder_inc_ref_for_node(struct binder_proc *proc,
1796 binder_proc_lock(proc);
1797 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1799 binder_proc_unlock(proc);
1803 binder_proc_lock(proc);
1804 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1820 binder_proc_unlock(proc);
1834 assert_spin_locked(&target_thread->proc->inner_lock);
1860 binder_inner_proc_lock(thread->proc);
1863 binder_inner_proc_unlock(thread->proc);
1867 binder_inner_proc_unlock(thread->proc);
1871 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1872 * @proc: proc to decrement
1875 * handle a transaction. proc->tmp_ref is incremented when
1879 * proc if appropriate (proc has been released, all threads have
1882 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1884 binder_inner_proc_lock(proc);
1885 proc->tmp_ref--;
1886 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1887 !proc->tmp_ref) {
1888 binder_inner_proc_unlock(proc);
1889 binder_free_proc(proc);
1892 binder_inner_proc_unlock(proc);
1922 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1931 __acquires(&t->from->proc->inner_lock)
1937 __acquire(&from->proc->inner_lock);
1940 binder_inner_proc_lock(from->proc);
1945 binder_inner_proc_unlock(from->proc);
1946 __acquire(&from->proc->inner_lock);
2004 target_thread->proc->pid,
2024 binder_inner_proc_unlock(target_thread->proc);
2029 __release(&target_thread->proc->inner_lock);
2071 * @proc: binder_proc owning the buffer
2085 static size_t binder_get_object(struct binder_proc *proc,
2104 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2139 * @proc: binder_proc owning the buffer
2160 struct binder_proc *proc,
2176 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2180 object_size = binder_get_object(proc, NULL, b, object_offset, object);
2191 * @proc: binder_proc owning the buffer
2228 static bool binder_validate_fixup(struct binder_proc *proc,
2245 size_t object_size = binder_get_object(proc, NULL, b,
2261 if (binder_alloc_copy_from_buffer(&proc->alloc,
2330 static void binder_transaction_buffer_release(struct binder_proc *proc,
2341 proc->pid, buffer->debug_id,
2357 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2360 object_size = binder_get_object(proc, NULL, buffer,
2375 node = binder_get_node(proc, fp->binder);
2395 ret = binder_dec_ref_for_handle(proc, fp->handle,
2445 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2486 &proc->alloc, &fd, buffer,
2510 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2520 binder_transaction_buffer_release(proc, thread, buffer,
2529 struct binder_proc *proc = thread->proc;
2534 node = binder_get_node(proc, fp->binder);
2536 node = binder_new_node(proc, fp);
2542 proc->pid, thread->pid, (u64)fp->binder,
2548 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2581 struct binder_proc *proc = thread->proc;
2587 node = binder_get_node_from_ref(proc, fp->handle,
2591 proc->pid, thread->pid, fp->handle);
2594 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2600 if (node->proc == target_proc) {
2607 if (node->proc)
2608 binder_inner_proc_lock(node->proc);
2610 __acquire(&node->proc->inner_lock);
2614 if (node->proc)
2615 binder_inner_proc_unlock(node->proc);
2617 __release(&node->proc->inner_lock);
2655 struct binder_proc *proc = thread->proc;
2668 proc->pid, thread->pid,
2678 proc->pid, thread->pid, fd);
2682 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2968 struct binder_proc *proc = thread->proc;
2977 proc->pid, thread->pid, (u64)fda->num_fds);
2984 proc->pid, thread->pid, (u64)fda->num_fds);
3002 proc->pid, thread->pid);
3035 struct binder_proc *proc = thread->proc;
3049 proc->pid, thread->pid);
3058 proc->pid, thread->pid);
3066 proc->pid, thread->pid);
3077 * @proc: process to send the transaction to
3078 * @thread: thread in @proc to send the transaction to (may be NULL)
3082 * wake it up. If no thread is found, the work is queued to the proc
3092 struct binder_proc *proc,
3109 binder_inner_proc_lock(proc);
3111 if (proc->is_dead || (thread && thread->is_dead)) {
3112 binder_inner_proc_unlock(proc);
3118 thread = binder_select_thread_ilocked(proc);
3123 binder_enqueue_work_ilocked(&t->work, &proc->todo);
3128 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3130 binder_inner_proc_unlock(proc);
3139 * @proc: returns @node->proc if valid
3140 * @error: if no @proc then returns BR_DEAD_REPLY
3150 * the transaction. We also need a tmpref on the proc while we are
3153 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3154 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3155 * target proc has died, @error is set to BR_DEAD_REPLY
3165 if (node->proc) {
3169 node->proc->tmp_ref++;
3170 *procp = node->proc;
3178 static void binder_transaction(struct binder_proc *proc,
3202 struct binder_context *context = proc->context;
3216 e->from_proc = proc->pid;
3221 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3224 binder_inner_proc_lock(proc);
3227 binder_inner_proc_unlock(proc);
3229 proc->pid, thread->pid);
3238 proc->pid, thread->pid, in_reply_to->debug_id,
3244 binder_inner_proc_unlock(proc);
3252 binder_inner_proc_unlock(proc);
3257 __release(&target_thread->proc->inner_lock);
3264 proc->pid, thread->pid,
3268 binder_inner_proc_unlock(target_thread->proc);
3276 target_proc = target_thread->proc;
3278 binder_inner_proc_unlock(target_thread->proc);
3290 binder_proc_lock(proc);
3291 ref = binder_get_ref_olocked(proc, tr->target.handle,
3299 proc->pid, thread->pid);
3302 binder_proc_unlock(proc);
3313 if (target_node && target_proc->pid == proc->pid) {
3315 proc->pid, thread->pid);
3331 if (WARN_ON(proc == target_proc)) {
3337 if (security_binder_transaction(proc->cred,
3344 binder_inner_proc_lock(proc);
3355 * thread from proc->waiting_threads to enqueue
3360 proc->pid, thread->pid);
3361 binder_inner_proc_unlock(proc);
3375 proc->pid, thread->pid, tmp->debug_id,
3380 binder_inner_proc_unlock(proc);
3391 if (from && from->proc == target_proc) {
3401 binder_inner_proc_unlock(proc);
3433 proc->pid, thread->pid, t->debug_id,
3442 proc->pid, thread->pid, t->debug_id,
3458 t->async_from_pid = thread->proc->pid;
3462 t->sender_euid = task_euid(proc->tsk);
3477 security_cred_getsecid(proc->cred, &secid);
3544 proc->pid, thread->pid);
3552 proc->pid, thread->pid, (u64)tr->offsets_size);
3560 proc->pid, thread->pid,
3606 proc->pid, thread->pid);
3616 proc->pid, thread->pid,
3706 proc->pid, thread->pid);
3719 proc->pid, thread->pid);
3730 binder_get_object(proc, user_buffer, t->buffer,
3734 proc->pid, thread->pid,
3769 proc->pid, thread->pid);
3784 /* Fixup buffer pointer to target proc address space */
3812 proc->pid, thread->pid, hdr->type);
3826 proc->pid, thread->pid);
3837 proc->pid, thread->pid);
3864 binder_inner_proc_lock(proc);
3879 binder_inner_proc_unlock(proc);
3881 binder_inner_proc_lock(proc);
3883 binder_inner_proc_unlock(proc);
3912 binder_dequeue_work(proc, tcomplete);
3955 proc->pid, thread->pid, return_error, return_error_param,
3989 * @proc: binder proc that owns buffer
3999 binder_free_buf(struct binder_proc *proc,
4003 binder_inner_proc_lock(proc);
4008 binder_inner_proc_unlock(proc);
4016 BUG_ON(buf_node->proc != proc);
4023 w, &proc->todo);
4024 binder_wakeup_proc_ilocked(proc);
4029 binder_release_entire_buffer(proc, thread, buffer, is_failure);
4030 binder_alloc_free_buf(&proc->alloc, buffer);
4033 static int binder_thread_write(struct binder_proc *proc,
4039 struct binder_context *context = proc->context;
4053 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4077 if (ctx_mgr_node->proc == proc) {
4079 proc->pid, thread->pid);
4084 proc, ctx_mgr_node,
4091 proc, target, increment, strong,
4095 proc->pid, thread->pid,
4115 proc->pid, thread->pid, debug_string,
4121 proc->pid, thread->pid, debug_string,
4139 node = binder_get_node(proc, node_ptr);
4142 proc->pid, thread->pid,
4151 proc->pid, thread->pid,
4163 proc->pid, thread->pid,
4173 proc->pid, thread->pid,
4186 proc->pid, thread->pid,
4209 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4215 proc->pid, thread->pid,
4220 proc->pid, thread->pid,
4227 proc->pid, thread->pid, (u64)data_ptr,
4230 binder_free_buf(proc, thread, buffer, false);
4241 binder_transaction(proc, thread, &tr.transaction_data,
4252 binder_transaction(proc, thread, &tr,
4260 proc->pid, thread->pid);
4261 binder_inner_proc_lock(proc);
4265 proc->pid, thread->pid);
4266 } else if (proc->requested_threads == 0) {
4269 proc->pid, thread->pid);
4271 proc->requested_threads--;
4272 proc->requested_threads_started++;
4275 binder_inner_proc_unlock(proc);
4280 proc->pid, thread->pid);
4284 proc->pid, thread->pid);
4291 proc->pid, thread->pid);
4324 proc->pid, thread->pid);
4328 binder_proc_lock(proc);
4329 ref = binder_get_ref_olocked(proc, target, false);
4332 proc->pid, thread->pid,
4337 binder_proc_unlock(proc);
4344 proc->pid, thread->pid,
4356 proc->pid, thread->pid);
4358 binder_proc_unlock(proc);
4366 if (ref->node->proc == NULL) {
4369 binder_inner_proc_lock(proc);
4371 &ref->death->work, &proc->todo);
4372 binder_wakeup_proc_ilocked(proc);
4373 binder_inner_proc_unlock(proc);
4378 proc->pid, thread->pid);
4380 binder_proc_unlock(proc);
4386 proc->pid, thread->pid,
4390 binder_proc_unlock(proc);
4394 binder_inner_proc_lock(proc);
4406 &proc->todo);
4408 proc);
4414 binder_inner_proc_unlock(proc);
4417 binder_proc_unlock(proc);
4428 binder_inner_proc_lock(proc);
4429 list_for_each_entry(w, &proc->delivered_death,
4443 proc->pid, thread->pid, (u64)cookie,
4447 proc->pid, thread->pid, (u64)cookie);
4448 binder_inner_proc_unlock(proc);
4462 &proc->todo);
4463 binder_wakeup_proc_ilocked(proc);
4466 binder_inner_proc_unlock(proc);
4471 proc->pid, thread->pid, cmd);
4479 static void binder_stat_br(struct binder_proc *proc,
4485 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4490 static int binder_put_node_cmd(struct binder_proc *proc,
4512 binder_stat_br(proc, thread, cmd);
4514 proc->pid, thread->pid, cmd_name, node_debug_id,
4525 struct binder_proc *proc = thread->proc;
4529 binder_inner_proc_lock(proc);
4536 &proc->waiting_threads);
4537 binder_inner_proc_unlock(proc);
4539 binder_inner_proc_lock(proc);
4547 binder_inner_proc_unlock(proc);
4555 * @proc: binder_proc associated @t->buffer
4567 static int binder_apply_fd_fixups(struct binder_proc *proc,
4589 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4603 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4618 static int binder_thread_read(struct binder_proc *proc,
4637 binder_inner_proc_lock(proc);
4639 binder_inner_proc_unlock(proc);
4645 !binder_worklist_empty(proc, &thread->todo));
4650 proc->pid, thread->pid, thread->looper);
4654 binder_set_nice(proc->default_priority);
4679 binder_inner_proc_lock(proc);
4682 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4684 list = &proc->todo;
4686 binder_inner_proc_unlock(proc);
4695 binder_inner_proc_unlock(proc);
4704 binder_inner_proc_unlock(proc);
4712 binder_inner_proc_unlock(proc);
4719 binder_stat_br(proc, thread, cmd);
4722 binder_inner_proc_unlock(proc);
4730 binder_stat_br(proc, thread, cmd);
4733 proc->pid, thread->pid);
4745 BUG_ON(proc != node->proc);
4771 proc->pid, thread->pid,
4775 rb_erase(&node->rb_node, &proc->nodes);
4776 binder_inner_proc_unlock(proc);
4790 binder_inner_proc_unlock(proc);
4794 proc, thread, &ptr, node_ptr,
4799 proc, thread, &ptr, node_ptr,
4804 proc, thread, &ptr, node_ptr,
4809 proc, thread, &ptr, node_ptr,
4815 proc->pid, thread->pid,
4838 proc->pid, thread->pid,
4844 binder_inner_proc_unlock(proc);
4849 w, &proc->delivered_death);
4850 binder_inner_proc_unlock(proc);
4859 binder_stat_br(proc, thread, cmd);
4864 binder_inner_proc_unlock(proc);
4866 proc->pid, thread->pid, w->type);
4898 struct task_struct *sender = t_from->proc->tsk;
4904 binder_inner_proc_lock(thread->proc);
4906 binder_inner_proc_unlock(thread->proc);
4911 binder_inner_proc_lock(thread->proc);
4913 binder_inner_proc_unlock(thread->proc);
4917 ret = binder_apply_fd_fixups(proc, t);
4928 binder_free_buf(proc, thread, buffer, true);
4931 proc->pid, thread->pid,
4940 binder_stat_br(proc, thread, cmd);
4979 binder_stat_br(proc, thread, cmd);
4982 proc->pid, thread->pid,
4986 t->debug_id, t_from ? t_from->proc->pid : 0,
4996 binder_inner_proc_lock(thread->proc);
4999 binder_inner_proc_unlock(thread->proc);
5002 binder_inner_proc_lock(thread->proc);
5006 binder_inner_proc_unlock(thread->proc);
5016 binder_inner_proc_lock(proc);
5017 if (proc->requested_threads == 0 &&
5018 list_empty(&thread->proc->waiting_threads) &&
5019 proc->requested_threads_started < proc->max_threads &&
5023 proc->requested_threads++;
5024 binder_inner_proc_unlock(proc);
5027 proc->pid, thread->pid);
5030 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5032 binder_inner_proc_unlock(proc);
5036 static void binder_release_work(struct binder_proc *proc,
5043 binder_inner_proc_lock(proc);
5046 binder_inner_proc_unlock(proc);
5096 struct binder_proc *proc, struct binder_thread *new_thread)
5100 struct rb_node **p = &proc->threads.rb_node;
5117 thread->proc = proc;
5123 rb_insert_color(&thread->rb_node, &proc->threads);
5133 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5138 binder_inner_proc_lock(proc);
5139 thread = binder_get_thread_ilocked(proc, NULL);
5140 binder_inner_proc_unlock(proc);
5145 binder_inner_proc_lock(proc);
5146 thread = binder_get_thread_ilocked(proc, new_thread);
5147 binder_inner_proc_unlock(proc);
5154 static void binder_free_proc(struct binder_proc *proc)
5158 BUG_ON(!list_empty(&proc->todo));
5159 BUG_ON(!list_empty(&proc->delivered_death));
5160 device = container_of(proc->context, struct binder_device, context);
5162 kfree(proc->context->name);
5165 binder_alloc_deferred_release(&proc->alloc);
5166 put_task_struct(proc->tsk);
5167 put_cred(proc->cred);
5169 kfree(proc);
5176 binder_proc_dec_tmpref(thread->proc);
5180 static int binder_thread_release(struct binder_proc *proc,
5188 binder_inner_proc_lock(thread->proc);
5190 * take a ref on the proc so it survives
5191 * after we remove this thread from proc->threads.
5195 proc->tmp_ref++;
5201 rb_erase(&thread->rb_node, &proc->threads);
5217 proc->pid, thread->pid,
5254 binder_inner_proc_unlock(thread->proc);
5268 binder_release_work(proc, &thread->todo);
5276 struct binder_proc *proc = filp->private_data;
5280 thread = binder_get_thread(proc);
5284 binder_inner_proc_lock(thread->proc);
5288 binder_inner_proc_unlock(thread->proc);
5303 struct binder_proc *proc = filp->private_data;
5318 proc->pid, thread->pid,
5323 ret = binder_thread_write(proc, thread,
5336 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5341 binder_inner_proc_lock(proc);
5342 if (!binder_worklist_empty_ilocked(&proc->todo))
5343 binder_wakeup_proc_ilocked(proc);
5344 binder_inner_proc_unlock(proc);
5353 proc->pid, thread->pid,
5368 struct binder_proc *proc = filp->private_data;
5369 struct binder_context *context = proc->context;
5379 ret = security_binder_set_context_mgr(proc->cred);
5394 new_node = binder_new_node(proc, fbo);
5412 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5416 struct binder_context *context = proc->context;
5422 proc->pid);
5429 context->binder_context_mgr_node->proc != proc) {
5435 node = binder_get_node_from_ref(proc, handle, true, NULL);
5448 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5456 binder_inner_proc_lock(proc);
5457 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5468 binder_inner_proc_unlock(proc);
5476 struct binder_proc *proc = filp->private_data;
5482 proc->pid, current->pid, cmd, arg);*/
5484 binder_selftest_alloc(&proc->alloc);
5492 thread = binder_get_thread(proc);
5512 binder_inner_proc_lock(proc);
5513 proc->max_threads = max_threads;
5514 binder_inner_proc_unlock(proc);
5536 proc->pid, thread->pid);
5537 binder_thread_release(proc, thread);
5562 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5581 ret = binder_ioctl_get_node_debug_info(proc, &info);
5613 binder_inner_proc_lock(proc);
5616 binder_inner_proc_unlock(proc);
5637 binder_inner_proc_lock(proc);
5643 binder_inner_proc_unlock(proc);
5671 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5679 struct binder_proc *proc = vma->vm_private_data;
5683 proc->pid, vma->vm_start, vma->vm_end,
5690 struct binder_proc *proc = vma->vm_private_data;
5694 proc->pid, vma->vm_start, vma->vm_end,
5697 binder_alloc_vma_close(&proc->alloc);
5713 struct binder_proc *proc = filp->private_data;
5715 if (proc->tsk != current->group_leader)
5720 __func__, proc->pid, vma->vm_start, vma->vm_end,
5726 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5733 vma->vm_private_data = proc;
5735 return binder_alloc_mmap_handler(&proc->alloc, vma);
5740 struct binder_proc *proc, *itr;
5749 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5750 if (proc == NULL)
5752 spin_lock_init(&proc->inner_lock);
5753 spin_lock_init(&proc->outer_lock);
5755 proc->tsk = current->group_leader;
5756 proc->cred = get_cred(filp->f_cred);
5757 INIT_LIST_HEAD(&proc->todo);
5758 proc->default_priority = task_nice(current);
5769 proc->context = &binder_dev->context;
5770 binder_alloc_init(&proc->alloc);
5773 proc->pid = current->group_leader->pid;
5774 INIT_LIST_HEAD(&proc->delivered_death);
5775 INIT_LIST_HEAD(&proc->waiting_threads);
5776 filp->private_data = proc;
5780 if (itr->pid == proc->pid) {
5785 hlist_add_head(&proc->proc_node, &binder_procs);
5791 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5793 * proc debug entries are shared between contexts.
5798 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5800 (void *)(unsigned long)proc->pid,
5808 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5816 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5818 proc->binderfs_entry = binderfs_entry;
5833 struct binder_proc *proc = filp->private_data;
5835 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5840 static void binder_deferred_flush(struct binder_proc *proc)
5845 binder_inner_proc_lock(proc);
5846 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5855 binder_inner_proc_unlock(proc);
5858 "binder_flush: %d woke %d threads\n", proc->pid,
5864 struct binder_proc *proc = filp->private_data;
5866 debugfs_remove(proc->debugfs_entry);
5868 if (proc->binderfs_entry) {
5869 binderfs_remove_file(proc->binderfs_entry);
5870 proc->binderfs_entry = NULL;
5873 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5882 struct binder_proc *proc = node->proc;
5884 binder_release_work(proc, &node->async_todo);
5887 binder_inner_proc_lock(proc);
5894 binder_inner_proc_unlock(proc);
5901 node->proc = NULL;
5904 binder_inner_proc_unlock(proc);
5918 binder_inner_proc_lock(ref->proc);
5920 binder_inner_proc_unlock(ref->proc);
5929 &ref->proc->todo);
5930 binder_wakeup_proc_ilocked(ref->proc);
5931 binder_inner_proc_unlock(ref->proc);
5943 static void binder_deferred_release(struct binder_proc *proc)
5945 struct binder_context *context = proc->context;
5950 hlist_del(&proc->proc_node);
5955 context->binder_context_mgr_node->proc == proc) {
5958 __func__, proc->pid);
5962 binder_inner_proc_lock(proc);
5964 * Make sure proc stays alive after we
5967 proc->tmp_ref++;
5969 proc->is_dead = true;
5972 while ((n = rb_first(&proc->threads))) {
5976 binder_inner_proc_unlock(proc);
5978 active_transactions += binder_thread_release(proc, thread);
5979 binder_inner_proc_lock(proc);
5984 while ((n = rb_first(&proc->nodes))) {
5995 rb_erase(&node->rb_node, &proc->nodes);
5996 binder_inner_proc_unlock(proc);
5998 binder_inner_proc_lock(proc);
6000 binder_inner_proc_unlock(proc);
6003 binder_proc_lock(proc);
6004 while ((n = rb_first(&proc->refs_by_desc))) {
6010 binder_proc_unlock(proc);
6012 binder_proc_lock(proc);
6014 binder_proc_unlock(proc);
6016 binder_release_work(proc, &proc->todo);
6017 binder_release_work(proc, &proc->delivered_death);
6021 __func__, proc->pid, threads, nodes, incoming_refs,
6024 binder_proc_dec_tmpref(proc);
6029 struct binder_proc *proc;
6036 proc = hlist_entry(binder_deferred_list.first,
6038 hlist_del_init(&proc->deferred_work_node);
6039 defer = proc->deferred_work;
6040 proc->deferred_work = 0;
6042 proc = NULL;
6048 binder_deferred_flush(proc);
6051 binder_deferred_release(proc); /* frees proc */
6052 } while (proc);
6057 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6060 proc->deferred_work |= defer;
6061 if (hlist_unhashed(&proc->deferred_work_node)) {
6062 hlist_add_head(&proc->deferred_work_node,
6070 struct binder_proc *proc,
6082 t->from ? t->from->proc->pid : 0,
6089 if (proc != to_proc) {
6092 * correct proc inner lock for this node
6110 struct binder_proc *proc,
6122 m, proc, transaction_prefix, t);
6172 print_binder_transaction_ilocked(m, thread->proc,
6176 print_binder_transaction_ilocked(m, thread->proc,
6180 print_binder_transaction_ilocked(m, thread->proc,
6186 print_binder_work_ilocked(m, thread->proc, " ",
6210 seq_puts(m, " proc");
6212 seq_printf(m, " %d", ref->proc->pid);
6215 if (node->proc) {
6217 print_binder_work_ilocked(m, node->proc, " ",
6228 ref->node->proc ? "" : "dead ",
6235 struct binder_proc *proc, int print_all)
6243 seq_printf(m, "proc %d\n", proc->pid);
6244 seq_printf(m, "context %s\n", proc->context->name);
6247 binder_inner_proc_lock(proc);
6248 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6252 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6265 binder_inner_proc_unlock(proc);
6272 binder_inner_proc_lock(proc);
6274 binder_inner_proc_unlock(proc);
6279 binder_proc_lock(proc);
6280 for (n = rb_first(&proc->refs_by_desc);
6286 binder_proc_unlock(proc);
6288 binder_alloc_print_allocated(m, &proc->alloc);
6289 binder_inner_proc_lock(proc);
6290 list_for_each_entry(w, &proc->todo, entry)
6291 print_binder_work_ilocked(m, proc, " ",
6293 list_for_each_entry(w, &proc->delivered_death, entry) {
6297 binder_inner_proc_unlock(proc);
6346 "proc",
6398 struct binder_proc *proc)
6405 binder_alloc_get_free_async_space(&proc->alloc);
6407 seq_printf(m, "proc %d\n", proc->pid);
6408 seq_printf(m, "context %s\n", proc->context->name);
6411 binder_inner_proc_lock(proc);
6412 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6415 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6421 " free async space %zd\n", proc->requested_threads,
6422 proc->requested_threads_started, proc->max_threads,
6426 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6428 binder_inner_proc_unlock(proc);
6433 binder_proc_lock(proc);
6434 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6441 binder_proc_unlock(proc);
6444 count = binder_alloc_get_allocated_count(&proc->alloc);
6447 binder_alloc_print_pages(m, &proc->alloc);
6450 binder_inner_proc_lock(proc);
6451 list_for_each_entry(w, &proc->todo, entry) {
6455 binder_inner_proc_unlock(proc);
6458 print_binder_stats(m, " ", &proc->stats);
6464 struct binder_proc *proc;
6494 hlist_for_each_entry(proc, &binder_procs, proc_node)
6495 print_binder_proc(m, proc, 1);
6503 struct binder_proc *proc;
6510 hlist_for_each_entry(proc, &binder_procs, proc_node)
6511 print_binder_proc_stats(m, proc);
6519 struct binder_proc *proc;
6523 hlist_for_each_entry(proc, &binder_procs, proc_node)
6524 print_binder_proc(m, proc, 0);
6538 seq_puts(m, "binder proc state:\n");
6621 from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->async_from_pid;
6655 struct binder_proc *proc,
6665 binder_inner_proc_lock(proc);
6666 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6682 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6691 binder_inner_proc_unlock(proc);
6699 binder_inner_proc_lock(proc);
6701 binder_inner_proc_unlock(proc);
6711 struct binder_proc *proc)
6715 size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc);
6717 seq_printf(m, "%d\t", proc->pid);
6718 seq_printf(m, "%s\t", proc->context->name);
6720 binder_inner_proc_lock(proc);
6721 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6725 "\t%zd\n", proc->requested_threads,
6726 proc->requested_threads_started, proc->max_threads,
6729 binder_inner_proc_unlock(proc);
6734 struct binder_proc *proc = NULL;
6739 hlist_for_each_entry(proc, &binder_procs, proc_node)
6740 print_binder_transaction_brief(m, proc, now);
6743 hlist_for_each_entry(proc, &binder_procs, proc_node)
6744 print_binder_proc_brief(m, proc);
6798 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",