Lines Matching refs:proc
15 * 1) proc->outer_lock : protects binder_ref
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
24 * (proc->todo, thread->todo, proc->delivered_death and
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
97 DEFINE_SHOW_ATTRIBUTE(proc);
290 * @proc: struct binder_proc to acquire
292 * Acquires proc->outer_lock. Used to protect binder_ref
293 * structures associated with the given proc.
295 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
297 _binder_proc_lock(struct binder_proc *proc, int line)
298 __acquires(&proc->outer_lock)
302 spin_lock(&proc->outer_lock);
307 * @proc: struct binder_proc to acquire
311 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
313 _binder_proc_unlock(struct binder_proc *proc, int line)
314 __releases(&proc->outer_lock)
318 spin_unlock(&proc->outer_lock);
323 * @proc: struct binder_proc to acquire
325 * Acquires proc->inner_lock. Used to protect todo lists
327 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
329 _binder_inner_proc_lock(struct binder_proc *proc, int line)
330 __acquires(&proc->inner_lock)
334 spin_lock(&proc->inner_lock);
339 * @proc: struct binder_proc to acquire
343 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
345 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
346 __releases(&proc->inner_lock)
350 spin_unlock(&proc->inner_lock);
389 * Acquires node->lock. If node->proc also acquires
390 * proc->inner_lock. Used to protect binder_node fields
395 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
400 if (node->proc)
401 binder_inner_proc_lock(node->proc);
404 __acquire(&node->proc->inner_lock);
416 __releases(&node->lock) __releases(&node->proc->inner_lock)
418 struct binder_proc *proc = node->proc;
422 if (proc)
423 binder_inner_proc_unlock(proc);
426 __release(&node->proc->inner_lock);
437 * @proc: binder_proc associated with list
442 static bool binder_worklist_empty(struct binder_proc *proc,
447 binder_inner_proc_lock(proc);
449 binder_inner_proc_unlock(proc);
461 * Requires the proc->inner_lock to be held.
481 * Requires the proc->inner_lock to be held.
499 * Requires the proc->inner_lock to be held.
532 binder_inner_proc_lock(thread->proc);
534 binder_inner_proc_unlock(thread->proc);
545 * @proc: binder_proc associated with list
552 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
554 binder_inner_proc_lock(proc);
556 binder_inner_proc_unlock(proc);
571 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
573 static void binder_free_proc(struct binder_proc *proc);
592 !binder_worklist_empty_ilocked(&thread->proc->todo));
599 binder_inner_proc_lock(thread->proc);
601 binder_inner_proc_unlock(thread->proc);
614 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
620 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
633 * binder_select_thread_ilocked() - selects a thread for doing proc work.
634 * @proc: process to select a thread from
645 binder_select_thread_ilocked(struct binder_proc *proc)
649 assert_spin_locked(&proc->inner_lock);
650 thread = list_first_entry_or_null(&proc->waiting_threads,
661 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
662 * @proc: process to wake up a thread in
666 * This function wakes up a thread in the @proc process.
676 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
680 assert_spin_locked(&proc->inner_lock);
690 /* Didn't find a thread waiting for proc work; this can happen
703 binder_wakeup_poll_threads_ilocked(proc, sync);
706 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
708 struct binder_thread *thread = binder_select_thread_ilocked(proc);
710 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
731 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
734 struct rb_node *n = proc->nodes.rb_node;
737 assert_spin_locked(&proc->inner_lock);
759 static struct binder_node *binder_get_node(struct binder_proc *proc,
764 binder_inner_proc_lock(proc);
765 node = binder_get_node_ilocked(proc, ptr);
766 binder_inner_proc_unlock(proc);
771 struct binder_proc *proc,
775 struct rb_node **p = &proc->nodes.rb_node;
782 assert_spin_locked(&proc->inner_lock);
807 rb_insert_color(&node->rb_node, &proc->nodes);
809 node->proc = proc;
821 proc->pid, current->pid, node->debug_id,
827 static struct binder_node *binder_new_node(struct binder_proc *proc,
835 binder_inner_proc_lock(proc);
836 node = binder_init_node_ilocked(proc, new_node, fp);
837 binder_inner_proc_unlock(proc);
857 struct binder_proc *proc = node->proc;
860 if (proc)
861 assert_spin_locked(&proc->inner_lock);
866 !(node->proc &&
867 node == node->proc->context->binder_context_mgr_node &&
917 struct binder_proc *proc = node->proc;
920 if (proc)
921 assert_spin_locked(&proc->inner_lock);
937 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
939 binder_enqueue_work_ilocked(&node->work, &proc->todo);
940 binder_wakeup_proc_ilocked(proc);
945 if (proc) {
947 rb_erase(&node->rb_node, &proc->nodes);
1003 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1011 if (node->proc)
1012 binder_inner_proc_lock(node->proc);
1016 if (node->proc)
1017 binder_inner_proc_unlock(node->proc);
1034 if (!node->proc)
1040 if (!node->proc)
1061 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1064 struct rb_node *n = proc->refs_by_desc.rb_node;
1086 * @proc: binder_proc that owns the ref
1094 * into the given proc rb_trees and node refs list.
1103 struct binder_proc *proc,
1107 struct binder_context *context = proc->context;
1108 struct rb_node **p = &proc->refs_by_node.rb_node;
1129 new_ref->proc = proc;
1132 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1135 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1142 p = &proc->refs_by_desc.rb_node;
1155 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1162 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1174 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1177 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1178 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1202 ref->proc->pid, ref->data.debug_id,
1204 binder_dequeue_work(ref->proc, &ref->death->work);
1216 * Increment the ref. @ref->proc->outer_lock must be held on entry
1257 ref->proc->pid, ref->data.debug_id,
1268 ref->proc->pid, ref->data.debug_id,
1283 * binder_get_node_from_ref() - get the node from the given proc/desc
1284 * @proc: proc containing the ref
1289 * Given a proc and ref handle, return the associated binder_node
1294 struct binder_proc *proc,
1301 binder_proc_lock(proc);
1302 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1313 binder_proc_unlock(proc);
1318 binder_proc_unlock(proc);
1339 * @proc: proc containing the ref
1345 * Given a proc and ref handle, increment or decrement the ref
1350 static int binder_update_ref_for_handle(struct binder_proc *proc,
1358 binder_proc_lock(proc);
1359 ref = binder_get_ref_olocked(proc, desc, strong);
1371 binder_proc_unlock(proc);
1378 binder_proc_unlock(proc);
1384 * @proc: proc containing the ref
1393 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1396 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1401 * binder_inc_ref_for_node() - increment the ref for given proc/node
1402 * @proc: proc containing the ref
1408 * Given a proc and node, increment the ref. Create the ref if it
1413 static int binder_inc_ref_for_node(struct binder_proc *proc,
1423 binder_proc_lock(proc);
1424 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1426 binder_proc_unlock(proc);
1430 binder_proc_lock(proc);
1431 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1447 binder_proc_unlock(proc);
1461 assert_spin_locked(&target_thread->proc->inner_lock);
1487 binder_inner_proc_lock(thread->proc);
1490 binder_inner_proc_unlock(thread->proc);
1494 binder_inner_proc_unlock(thread->proc);
1498 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1499 * @proc: proc to decrement
1502 * handle a transaction. proc->tmp_ref is incremented when
1506 * proc if appropriate (proc has been released, all threads have
1509 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1511 binder_inner_proc_lock(proc);
1512 proc->tmp_ref--;
1513 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1514 !proc->tmp_ref) {
1515 binder_inner_proc_unlock(proc);
1516 binder_free_proc(proc);
1519 binder_inner_proc_unlock(proc);
1549 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1558 __acquires(&t->from->proc->inner_lock)
1564 __acquire(&from->proc->inner_lock);
1567 binder_inner_proc_lock(from->proc);
1572 binder_inner_proc_unlock(from->proc);
1573 __acquire(&from->proc->inner_lock);
1606 from_proc = t->from ? t->from->proc->pid : 0;
1655 target_thread->proc->pid,
1675 binder_inner_proc_unlock(target_thread->proc);
1680 __release(&target_thread->proc->inner_lock);
1722 * @proc: binder_proc owning the buffer
1736 static size_t binder_get_object(struct binder_proc *proc,
1753 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1788 * @proc: binder_proc owning the buffer
1809 struct binder_proc *proc,
1825 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1829 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1840 * @proc: binder_proc owning the buffer
1877 static bool binder_validate_fixup(struct binder_proc *proc,
1894 size_t object_size = binder_get_object(proc, NULL, b,
1910 if (binder_alloc_copy_from_buffer(&proc->alloc,
1981 static void binder_transaction_buffer_release(struct binder_proc *proc,
1992 proc->pid, buffer->debug_id,
2008 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2011 object_size = binder_get_object(proc, NULL, buffer,
2026 node = binder_get_node(proc, fp->binder);
2046 ret = binder_dec_ref_for_handle(proc, fp->handle,
2096 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2137 &proc->alloc, &fd, buffer,
2161 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2171 binder_transaction_buffer_release(proc, thread, buffer,
2180 struct binder_proc *proc = thread->proc;
2185 node = binder_get_node(proc, fp->binder);
2187 node = binder_new_node(proc, fp);
2193 proc->pid, thread->pid, (u64)fp->binder,
2199 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2232 struct binder_proc *proc = thread->proc;
2238 node = binder_get_node_from_ref(proc, fp->handle,
2242 proc->pid, thread->pid, fp->handle);
2245 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2251 if (node->proc == target_proc) {
2258 if (node->proc)
2259 binder_inner_proc_lock(node->proc);
2261 __acquire(&node->proc->inner_lock);
2265 if (node->proc)
2266 binder_inner_proc_unlock(node->proc);
2268 __release(&node->proc->inner_lock);
2306 struct binder_proc *proc = thread->proc;
2319 proc->pid, thread->pid,
2329 proc->pid, thread->pid, fd);
2333 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2620 struct binder_proc *proc = thread->proc;
2629 proc->pid, thread->pid, (u64)fda->num_fds);
2636 proc->pid, thread->pid, (u64)fda->num_fds);
2654 proc->pid, thread->pid);
2687 struct binder_proc *proc = thread->proc;
2701 proc->pid, thread->pid);
2710 proc->pid, thread->pid);
2718 proc->pid, thread->pid);
2756 * Requires the proc->inner_lock to be held.
2779 * @proc: process to send the transaction to
2780 * @thread: thread in @proc to send the transaction to (may be NULL)
2784 * wake it up. If no thread is found, the work is queued to the proc
2798 struct binder_proc *proc,
2817 binder_inner_proc_lock(proc);
2818 if (proc->is_frozen) {
2820 proc->sync_recv |= !oneway;
2821 proc->async_recv |= oneway;
2824 if ((frozen && !oneway) || proc->is_dead ||
2826 binder_inner_proc_unlock(proc);
2832 thread = binder_select_thread_ilocked(proc);
2837 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2847 proc->outstanding_txns--;
2854 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2856 proc->outstanding_txns++;
2857 binder_inner_proc_unlock(proc);
2870 binder_release_entire_buffer(proc, NULL, buffer, false);
2871 binder_alloc_free_buf(&proc->alloc, buffer);
2885 * @procp: returns @node->proc if valid
2896 * the transaction. We also need a tmpref on the proc while we are
2899 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2900 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2901 * target proc has died, @error is set to BR_DEAD_REPLY.
2911 if (node->proc) {
2915 node->proc->tmp_ref++;
2916 *procp = node->proc;
2931 __release(&from->proc->inner_lock);
2938 binder_inner_proc_unlock(from->proc);
2942 static void binder_transaction(struct binder_proc *proc,
2966 struct binder_context *context = proc->context;
2981 e->from_proc = proc->pid;
2986 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2988 binder_inner_proc_lock(proc);
2990 binder_inner_proc_unlock(proc);
2993 binder_inner_proc_lock(proc);
2996 binder_inner_proc_unlock(proc);
2998 proc->pid, thread->pid);
3007 proc->pid, thread->pid, in_reply_to->debug_id,
3013 binder_inner_proc_unlock(proc);
3021 binder_inner_proc_unlock(proc);
3026 __release(&target_thread->proc->inner_lock);
3028 thread->pid, proc->pid);
3035 proc->pid, thread->pid,
3039 binder_inner_proc_unlock(target_thread->proc);
3047 target_proc = target_thread->proc;
3049 binder_inner_proc_unlock(target_thread->proc);
3061 binder_proc_lock(proc);
3062 ref = binder_get_ref_olocked(proc, tr->target.handle,
3070 proc->pid, thread->pid, tr->target.handle);
3073 binder_proc_unlock(proc);
3084 if (target_node && target_proc->pid == proc->pid) {
3086 proc->pid, thread->pid);
3095 thread->pid, proc->pid);
3104 if (WARN_ON(proc == target_proc)) {
3106 thread->pid, proc->pid);
3112 if (security_binder_transaction(proc->cred,
3115 thread->pid, proc->pid);
3121 binder_inner_proc_lock(proc);
3132 * thread from proc->waiting_threads to enqueue
3137 proc->pid, thread->pid);
3138 binder_inner_proc_unlock(proc);
3152 proc->pid, thread->pid, tmp->debug_id,
3157 binder_inner_proc_unlock(proc);
3168 if (from && from->proc == target_proc) {
3178 binder_inner_proc_unlock(proc);
3188 thread->pid, proc->pid);
3201 thread->pid, proc->pid);
3215 proc->pid, thread->pid, t->debug_id,
3224 proc->pid, thread->pid, t->debug_id,
3240 t->from_pid = thread->proc->pid;
3245 t->sender_euid = task_euid(proc->tsk);
3260 security_cred_getsecid(proc->cred, &secid);
3264 thread->pid, proc->pid);
3274 thread->pid, proc->pid);
3336 proc->pid, thread->pid);
3344 proc->pid, thread->pid, (u64)tr->offsets_size);
3352 proc->pid, thread->pid,
3380 thread->pid, proc->pid);
3399 proc->pid, thread->pid);
3409 proc->pid, thread->pid,
3440 thread->pid, proc->pid);
3459 thread->pid, proc->pid);
3481 thread->pid, proc->pid);
3505 proc->pid, thread->pid);
3518 proc->pid, thread->pid);
3529 binder_get_object(proc, user_buffer, t->buffer,
3533 proc->pid, thread->pid,
3552 thread->pid, proc->pid);
3570 proc->pid, thread->pid);
3581 thread->pid, proc->pid);
3587 /* Fixup buffer pointer to target proc address space */
3606 thread->pid, proc->pid);
3617 proc->pid, thread->pid, hdr->type);
3631 proc->pid, thread->pid);
3642 proc->pid, thread->pid);
3674 binder_inner_proc_lock(proc);
3689 binder_inner_proc_unlock(proc);
3693 binder_inner_proc_lock(proc);
3695 binder_inner_proc_unlock(proc);
3732 thread->pid, proc->pid);
3734 binder_dequeue_work(proc, tcomplete);
3775 proc->pid, thread->pid, reply ? "reply" :
3813 binder_inner_proc_lock(proc);
3816 binder_inner_proc_unlock(proc);
3824 * @proc: binder proc that owns buffer
3834 binder_free_buf(struct binder_proc *proc,
3838 binder_inner_proc_lock(proc);
3843 binder_inner_proc_unlock(proc);
3851 BUG_ON(buf_node->proc != proc);
3858 w, &proc->todo);
3859 binder_wakeup_proc_ilocked(proc);
3864 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3865 binder_alloc_free_buf(&proc->alloc, buffer);
3868 static int binder_thread_write(struct binder_proc *proc,
3874 struct binder_context *context = proc->context;
3888 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3913 if (ctx_mgr_node->proc == proc) {
3915 proc->pid, thread->pid);
3920 proc, ctx_mgr_node,
3927 proc, target, increment, strong,
3931 proc->pid, thread->pid,
3951 proc->pid, thread->pid, debug_string,
3957 proc->pid, thread->pid, debug_string,
3975 node = binder_get_node(proc, node_ptr);
3978 proc->pid, thread->pid,
3987 proc->pid, thread->pid,
3999 proc->pid, thread->pid,
4009 proc->pid, thread->pid,
4022 proc->pid, thread->pid,
4045 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4051 proc->pid, thread->pid,
4056 proc->pid, thread->pid,
4063 proc->pid, thread->pid, (u64)data_ptr,
4066 binder_free_buf(proc, thread, buffer, false);
4077 binder_transaction(proc, thread, &tr.transaction_data,
4088 binder_transaction(proc, thread, &tr,
4096 proc->pid, thread->pid);
4097 binder_inner_proc_lock(proc);
4101 proc->pid, thread->pid);
4102 } else if (proc->requested_threads == 0) {
4105 proc->pid, thread->pid);
4107 proc->requested_threads--;
4108 proc->requested_threads_started++;
4111 binder_inner_proc_unlock(proc);
4116 proc->pid, thread->pid);
4120 proc->pid, thread->pid);
4127 proc->pid, thread->pid);
4160 proc->pid, thread->pid);
4164 binder_proc_lock(proc);
4165 ref = binder_get_ref_olocked(proc, target, false);
4168 proc->pid, thread->pid,
4173 binder_proc_unlock(proc);
4180 proc->pid, thread->pid,
4192 proc->pid, thread->pid);
4194 binder_proc_unlock(proc);
4202 if (ref->node->proc == NULL) {
4205 binder_inner_proc_lock(proc);
4207 &ref->death->work, &proc->todo);
4208 binder_wakeup_proc_ilocked(proc);
4209 binder_inner_proc_unlock(proc);
4214 proc->pid, thread->pid);
4216 binder_proc_unlock(proc);
4222 proc->pid, thread->pid,
4226 binder_proc_unlock(proc);
4230 binder_inner_proc_lock(proc);
4242 &proc->todo);
4244 proc);
4250 binder_inner_proc_unlock(proc);
4253 binder_proc_unlock(proc);
4264 binder_inner_proc_lock(proc);
4265 list_for_each_entry(w, &proc->delivered_death,
4279 proc->pid, thread->pid, (u64)cookie,
4283 proc->pid, thread->pid, (u64)cookie);
4284 binder_inner_proc_unlock(proc);
4298 &proc->todo);
4299 binder_wakeup_proc_ilocked(proc);
4302 binder_inner_proc_unlock(proc);
4307 proc->pid, thread->pid, cmd);
4315 static void binder_stat_br(struct binder_proc *proc,
4321 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4326 static int binder_put_node_cmd(struct binder_proc *proc,
4348 binder_stat_br(proc, thread, cmd);
4350 proc->pid, thread->pid, cmd_name, node_debug_id,
4361 struct binder_proc *proc = thread->proc;
4364 binder_inner_proc_lock(proc);
4371 &proc->waiting_threads);
4372 binder_inner_proc_unlock(proc);
4374 binder_inner_proc_lock(proc);
4382 binder_inner_proc_unlock(proc);
4389 * @proc: binder_proc associated @t->buffer
4400 static int binder_apply_fd_fixups(struct binder_proc *proc,
4421 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4441 static int binder_thread_read(struct binder_proc *proc,
4460 binder_inner_proc_lock(proc);
4462 binder_inner_proc_unlock(proc);
4468 !binder_worklist_empty(proc, &thread->todo));
4473 proc->pid, thread->pid, thread->looper);
4477 binder_set_nice(proc->default_priority);
4502 binder_inner_proc_lock(proc);
4505 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4507 list = &proc->todo;
4509 binder_inner_proc_unlock(proc);
4518 binder_inner_proc_unlock(proc);
4527 binder_inner_proc_unlock(proc);
4535 binder_inner_proc_unlock(proc);
4542 binder_stat_br(proc, thread, cmd);
4547 if (proc->oneway_spam_detection_enabled &&
4554 binder_inner_proc_unlock(proc);
4561 binder_stat_br(proc, thread, cmd);
4564 proc->pid, thread->pid);
4576 BUG_ON(proc != node->proc);
4602 proc->pid, thread->pid,
4606 rb_erase(&node->rb_node, &proc->nodes);
4607 binder_inner_proc_unlock(proc);
4621 binder_inner_proc_unlock(proc);
4625 proc, thread, &ptr, node_ptr,
4630 proc, thread, &ptr, node_ptr,
4635 proc, thread, &ptr, node_ptr,
4640 proc, thread, &ptr, node_ptr,
4646 proc->pid, thread->pid,
4669 proc->pid, thread->pid,
4675 binder_inner_proc_unlock(proc);
4680 w, &proc->delivered_death);
4681 binder_inner_proc_unlock(proc);
4690 binder_stat_br(proc, thread, cmd);
4695 binder_inner_proc_unlock(proc);
4697 proc->pid, thread->pid, w->type);
4729 struct task_struct *sender = t_from->proc->tsk;
4735 binder_inner_proc_lock(thread->proc);
4737 binder_inner_proc_unlock(thread->proc);
4742 binder_inner_proc_lock(thread->proc);
4744 binder_inner_proc_unlock(thread->proc);
4748 ret = binder_apply_fd_fixups(proc, t);
4759 binder_free_buf(proc, thread, buffer, true);
4762 proc->pid, thread->pid,
4771 binder_stat_br(proc, thread, cmd);
4810 binder_stat_br(proc, thread, cmd);
4813 proc->pid, thread->pid,
4817 t->debug_id, t_from ? t_from->proc->pid : 0,
4827 binder_inner_proc_lock(thread->proc);
4830 binder_inner_proc_unlock(thread->proc);
4833 binder_inner_proc_lock(thread->proc);
4837 binder_inner_proc_unlock(thread->proc);
4847 binder_inner_proc_lock(proc);
4848 if (proc->requested_threads == 0 &&
4849 list_empty(&thread->proc->waiting_threads) &&
4850 proc->requested_threads_started < proc->max_threads &&
4854 proc->requested_threads++;
4855 binder_inner_proc_unlock(proc);
4858 proc->pid, thread->pid);
4861 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4863 binder_inner_proc_unlock(proc);
4867 static void binder_release_work(struct binder_proc *proc,
4874 binder_inner_proc_lock(proc);
4877 binder_inner_proc_unlock(proc);
4929 struct binder_proc *proc, struct binder_thread *new_thread)
4933 struct rb_node **p = &proc->threads.rb_node;
4950 thread->proc = proc;
4956 rb_insert_color(&thread->rb_node, &proc->threads);
4967 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4972 binder_inner_proc_lock(proc);
4973 thread = binder_get_thread_ilocked(proc, NULL);
4974 binder_inner_proc_unlock(proc);
4979 binder_inner_proc_lock(proc);
4980 thread = binder_get_thread_ilocked(proc, new_thread);
4981 binder_inner_proc_unlock(proc);
4988 static void binder_free_proc(struct binder_proc *proc)
4992 BUG_ON(!list_empty(&proc->todo));
4993 BUG_ON(!list_empty(&proc->delivered_death));
4994 if (proc->outstanding_txns)
4996 __func__, proc->outstanding_txns);
4997 device = container_of(proc->context, struct binder_device, context);
4999 kfree(proc->context->name);
5002 binder_alloc_deferred_release(&proc->alloc);
5003 put_task_struct(proc->tsk);
5004 put_cred(proc->cred);
5006 kfree(proc);
5013 binder_proc_dec_tmpref(thread->proc);
5017 static int binder_thread_release(struct binder_proc *proc,
5025 binder_inner_proc_lock(thread->proc);
5027 * take a ref on the proc so it survives
5028 * after we remove this thread from proc->threads.
5032 proc->tmp_ref++;
5038 rb_erase(&thread->rb_node, &proc->threads);
5054 proc->pid, thread->pid,
5059 thread->proc->outstanding_txns--;
5092 binder_inner_proc_unlock(thread->proc);
5106 binder_release_work(proc, &thread->todo);
5114 struct binder_proc *proc = filp->private_data;
5118 thread = binder_get_thread(proc);
5122 binder_inner_proc_lock(thread->proc);
5126 binder_inner_proc_unlock(thread->proc);
5141 struct binder_proc *proc = filp->private_data;
5157 proc->pid, thread->pid,
5162 ret = binder_thread_write(proc, thread,
5175 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5180 binder_inner_proc_lock(proc);
5181 if (!binder_worklist_empty_ilocked(&proc->todo))
5182 binder_wakeup_proc_ilocked(proc);
5183 binder_inner_proc_unlock(proc);
5192 proc->pid, thread->pid,
5207 struct binder_proc *proc = filp->private_data;
5208 struct binder_context *context = proc->context;
5218 ret = security_binder_set_context_mgr(proc->cred);
5233 new_node = binder_new_node(proc, fbo);
5251 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5255 struct binder_context *context = proc->context;
5261 proc->pid);
5268 context->binder_context_mgr_node->proc != proc) {
5274 node = binder_get_node_from_ref(proc, handle, true, NULL);
5287 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5295 binder_inner_proc_lock(proc);
5296 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5307 binder_inner_proc_unlock(proc);
5312 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5317 if (proc->outstanding_txns > 0)
5320 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5411 binder_inner_proc_lock(thread->proc);
5414 binder_inner_proc_unlock(thread->proc);
5425 struct binder_proc *proc = filp->private_data;
5431 proc->pid, current->pid, cmd, arg);*/
5433 binder_selftest_alloc(&proc->alloc);
5441 thread = binder_get_thread(proc);
5461 binder_inner_proc_lock(proc);
5462 proc->max_threads = max_threads;
5463 binder_inner_proc_unlock(proc);
5485 proc->pid, thread->pid);
5486 binder_thread_release(proc, thread);
5511 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5530 ret = binder_ioctl_get_node_debug_info(proc, &info);
5625 binder_inner_proc_lock(proc);
5626 proc->oneway_spam_detection_enabled = (bool)enable;
5627 binder_inner_proc_unlock(proc);
5657 binder_inner_proc_lock(proc);
5660 binder_inner_proc_unlock(proc);
5681 binder_inner_proc_lock(proc);
5687 binder_inner_proc_unlock(proc);
5715 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5723 struct binder_proc *proc = vma->vm_private_data;
5727 proc->pid, vma->vm_start, vma->vm_end,
5734 struct binder_proc *proc = vma->vm_private_data;
5738 proc->pid, vma->vm_start, vma->vm_end,
5741 binder_alloc_vma_close(&proc->alloc);
5757 struct binder_proc *proc = filp->private_data;
5759 if (proc->tsk != current->group_leader)
5764 __func__, proc->pid, vma->vm_start, vma->vm_end,
5770 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5776 vma->vm_private_data = proc;
5778 return binder_alloc_mmap_handler(&proc->alloc, vma);
5783 struct binder_proc *proc, *itr;
5792 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5793 if (proc == NULL)
5795 spin_lock_init(&proc->inner_lock);
5796 spin_lock_init(&proc->outer_lock);
5798 proc->tsk = current->group_leader;
5799 proc->cred = get_cred(filp->f_cred);
5800 INIT_LIST_HEAD(&proc->todo);
5801 init_waitqueue_head(&proc->freeze_wait);
5802 proc->default_priority = task_nice(current);
5813 proc->context = &binder_dev->context;
5814 binder_alloc_init(&proc->alloc);
5817 proc->pid = current->group_leader->pid;
5818 INIT_LIST_HEAD(&proc->delivered_death);
5819 INIT_LIST_HEAD(&proc->waiting_threads);
5820 filp->private_data = proc;
5824 if (itr->pid == proc->pid) {
5829 hlist_add_head(&proc->proc_node, &binder_procs);
5835 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5837 * proc debug entries are shared between contexts.
5842 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5844 (void *)(unsigned long)proc->pid,
5852 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5860 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5862 proc->binderfs_entry = binderfs_entry;
5877 struct binder_proc *proc = filp->private_data;
5879 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5884 static void binder_deferred_flush(struct binder_proc *proc)
5889 binder_inner_proc_lock(proc);
5890 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5899 binder_inner_proc_unlock(proc);
5902 "binder_flush: %d woke %d threads\n", proc->pid,
5908 struct binder_proc *proc = filp->private_data;
5910 debugfs_remove(proc->debugfs_entry);
5912 if (proc->binderfs_entry) {
5913 binderfs_remove_file(proc->binderfs_entry);
5914 proc->binderfs_entry = NULL;
5917 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5926 struct binder_proc *proc = node->proc;
5928 binder_release_work(proc, &node->async_todo);
5931 binder_inner_proc_lock(proc);
5938 binder_inner_proc_unlock(proc);
5945 node->proc = NULL;
5948 binder_inner_proc_unlock(proc);
5962 binder_inner_proc_lock(ref->proc);
5964 binder_inner_proc_unlock(ref->proc);
5973 &ref->proc->todo);
5974 binder_wakeup_proc_ilocked(ref->proc);
5975 binder_inner_proc_unlock(ref->proc);
5987 static void binder_deferred_release(struct binder_proc *proc)
5989 struct binder_context *context = proc->context;
5994 hlist_del(&proc->proc_node);
5999 context->binder_context_mgr_node->proc == proc) {
6002 __func__, proc->pid);
6006 binder_inner_proc_lock(proc);
6008 * Make sure proc stays alive after we
6011 proc->tmp_ref++;
6013 proc->is_dead = true;
6014 proc->is_frozen = false;
6015 proc->sync_recv = false;
6016 proc->async_recv = false;
6019 while ((n = rb_first(&proc->threads))) {
6023 binder_inner_proc_unlock(proc);
6025 active_transactions += binder_thread_release(proc, thread);
6026 binder_inner_proc_lock(proc);
6031 while ((n = rb_first(&proc->nodes))) {
6042 rb_erase(&node->rb_node, &proc->nodes);
6043 binder_inner_proc_unlock(proc);
6045 binder_inner_proc_lock(proc);
6047 binder_inner_proc_unlock(proc);
6050 binder_proc_lock(proc);
6051 while ((n = rb_first(&proc->refs_by_desc))) {
6057 binder_proc_unlock(proc);
6059 binder_proc_lock(proc);
6061 binder_proc_unlock(proc);
6063 binder_release_work(proc, &proc->todo);
6064 binder_release_work(proc, &proc->delivered_death);
6068 __func__, proc->pid, threads, nodes, incoming_refs,
6071 binder_proc_dec_tmpref(proc);
6076 struct binder_proc *proc;
6083 proc = hlist_entry(binder_deferred_list.first,
6085 hlist_del_init(&proc->deferred_work_node);
6086 defer = proc->deferred_work;
6087 proc->deferred_work = 0;
6089 proc = NULL;
6095 binder_deferred_flush(proc);
6098 binder_deferred_release(proc); /* frees proc */
6099 } while (proc);
6104 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6107 proc->deferred_work |= defer;
6108 if (hlist_unhashed(&proc->deferred_work_node)) {
6109 hlist_add_head(&proc->deferred_work_node,
6117 struct binder_proc *proc,
6138 if (proc != to_proc) {
6141 * correct proc inner lock for this node
6159 struct binder_proc *proc,
6171 m, proc, transaction_prefix, t);
6221 print_binder_transaction_ilocked(m, thread->proc,
6225 print_binder_transaction_ilocked(m, thread->proc,
6229 print_binder_transaction_ilocked(m, thread->proc,
6235 print_binder_work_ilocked(m, thread->proc, " ",
6259 seq_puts(m, " proc");
6261 seq_printf(m, " %d", ref->proc->pid);
6264 if (node->proc) {
6266 print_binder_work_ilocked(m, node->proc, " ",
6277 ref->node->proc ? "" : "dead ",
6284 struct binder_proc *proc, int print_all)
6292 seq_printf(m, "proc %d\n", proc->pid);
6293 seq_printf(m, "context %s\n", proc->context->name);
6296 binder_inner_proc_lock(proc);
6297 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6301 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6314 binder_inner_proc_unlock(proc);
6321 binder_inner_proc_lock(proc);
6323 binder_inner_proc_unlock(proc);
6328 binder_proc_lock(proc);
6329 for (n = rb_first(&proc->refs_by_desc);
6335 binder_proc_unlock(proc);
6337 binder_alloc_print_allocated(m, &proc->alloc);
6338 binder_inner_proc_lock(proc);
6339 list_for_each_entry(w, &proc->todo, entry)
6340 print_binder_work_ilocked(m, proc, " ",
6342 list_for_each_entry(w, &proc->delivered_death, entry) {
6346 binder_inner_proc_unlock(proc);
6398 "proc",
6450 struct binder_proc *proc)
6457 binder_alloc_get_free_async_space(&proc->alloc);
6459 seq_printf(m, "proc %d\n", proc->pid);
6460 seq_printf(m, "context %s\n", proc->context->name);
6463 binder_inner_proc_lock(proc);
6464 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6467 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6473 " free async space %zd\n", proc->requested_threads,
6474 proc->requested_threads_started, proc->max_threads,
6478 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6480 binder_inner_proc_unlock(proc);
6485 binder_proc_lock(proc);
6486 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6493 binder_proc_unlock(proc);
6496 count = binder_alloc_get_allocated_count(&proc->alloc);
6499 binder_alloc_print_pages(m, &proc->alloc);
6502 binder_inner_proc_lock(proc);
6503 list_for_each_entry(w, &proc->todo, entry) {
6507 binder_inner_proc_unlock(proc);
6510 print_binder_stats(m, " ", &proc->stats);
6515 struct binder_proc *proc;
6545 hlist_for_each_entry(proc, &binder_procs, proc_node)
6546 print_binder_proc(m, proc, 1);
6554 struct binder_proc *proc;
6561 hlist_for_each_entry(proc, &binder_procs, proc_node)
6562 print_binder_proc_stats(m, proc);
6570 struct binder_proc *proc;
6574 hlist_for_each_entry(proc, &binder_procs, proc_node)
6575 print_binder_proc(m, proc, 0);
6589 seq_puts(m, "binder proc state:\n");
6710 from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->from_pid;
6744 struct binder_proc *proc,
6754 binder_inner_proc_lock(proc);
6755 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6771 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6780 binder_inner_proc_unlock(proc);
6788 binder_inner_proc_lock(proc);
6790 binder_inner_proc_unlock(proc);
6800 struct binder_proc *proc)
6804 size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc);
6806 seq_printf(m, "%d\t", proc->pid);
6807 seq_printf(m, "%s\t", proc->context->name);
6809 binder_inner_proc_lock(proc);
6810 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6814 "\t%zd\n", proc->requested_threads,
6815 proc->requested_threads_started, proc->max_threads,
6818 binder_inner_proc_unlock(proc);
6823 struct binder_proc *proc = NULL;
6828 hlist_for_each_entry(proc, &binder_procs, proc_node)
6829 print_binder_transaction_brief(m, proc, now);
6832 hlist_for_each_entry(proc, &binder_procs, proc_node)
6833 print_binder_proc_brief(m, proc);
6894 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",