1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9 /*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
17 * used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
20 * used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
27 * are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69 #include <linux/ktime.h>
70 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
71 #include <linux/trace_clock.h>
72 #include <linux/proc_fs.h>
73 #endif
74
75 #include <uapi/linux/android/binder.h>
76
77 #include <linux/cacheflush.h>
78
79 #include "binder_internal.h"
80 #include "binder_trace.h"
81
82 static HLIST_HEAD(binder_deferred_list);
83 static DEFINE_MUTEX(binder_deferred_lock);
84
85 static HLIST_HEAD(binder_devices);
86 static HLIST_HEAD(binder_procs);
87 static DEFINE_MUTEX(binder_procs_lock);
88
89 static HLIST_HEAD(binder_dead_nodes);
90 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
91
92 static struct dentry *binder_debugfs_dir_entry_root;
93 static struct dentry *binder_debugfs_dir_entry_proc;
94 static atomic_t binder_last_id;
95
96 static int proc_show(struct seq_file *m, void *unused);
97 DEFINE_SHOW_ATTRIBUTE(proc);
98
99 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
100 static int binder_transaction_proc_show(struct seq_file *m, void *unused);
101 DEFINE_PROC_SHOW_ATTRIBUTE(binder_transaction_proc);
102 #endif
103
104 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
105
106 #ifdef CONFIG_ACCESS_TOKENID
107 #define ENABLE_ACCESS_TOKENID 1
108 #else
109 #define ENABLE_ACCESS_TOKENID 0
110 #endif /* CONFIG_ACCESS_TOKENID */
111
112 #ifdef CONFIG_BINDER_SENDER_INFO
113 #define ENABLE_BINDER_SENDER_INFO 1
114 #else
115 #define ENABLE_BINDER_SENDER_INFO 0
116 #endif /* CONFIG_BINDER_SENDER_INFO */
117
118 #define ACCESS_TOKENID_FEATURE_VALUE (ENABLE_ACCESS_TOKENID << 0)
119 #define BINDER_SENDER_INFO_FEATURE_VALUE (ENABLE_BINDER_SENDER_INFO << 2)
120
121 #define BINDER_CURRENT_FEATURE_SET (ACCESS_TOKENID_FEATURE_VALUE | BINDER_SENDER_INFO_FEATURE_VALUE)
122
123 enum {
124 BINDER_DEBUG_USER_ERROR = 1U << 0,
125 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
126 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
127 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
128 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
129 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
130 BINDER_DEBUG_READ_WRITE = 1U << 6,
131 BINDER_DEBUG_USER_REFS = 1U << 7,
132 BINDER_DEBUG_THREADS = 1U << 8,
133 BINDER_DEBUG_TRANSACTION = 1U << 9,
134 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
135 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
136 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
137 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
138 BINDER_DEBUG_SPINLOCKS = 1U << 14,
139 };
140 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
141 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
142 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
143
144 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
145 module_param_named(devices, binder_devices_param, charp, 0444);
146
147 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
148 static int binder_stop_on_user_error;
149
binder_set_stop_on_user_error(const char *val, const struct kernel_param *kp)150 static int binder_set_stop_on_user_error(const char *val,
151 const struct kernel_param *kp)
152 {
153 int ret;
154
155 ret = param_set_int(val, kp);
156 if (binder_stop_on_user_error < 2)
157 wake_up(&binder_user_error_wait);
158 return ret;
159 }
160 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
161 param_get_int, &binder_stop_on_user_error, 0644);
162
binder_debug(int mask, const char *format, ...)163 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
164 {
165 struct va_format vaf;
166 va_list args;
167
168 if (binder_debug_mask & mask) {
169 va_start(args, format);
170 vaf.va = &args;
171 vaf.fmt = format;
172 pr_info_ratelimited("%pV", &vaf);
173 va_end(args);
174 }
175 }
176
177 #define binder_txn_error(x...) \
178 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
179
binder_user_error(const char *format, ...)180 static __printf(1, 2) void binder_user_error(const char *format, ...)
181 {
182 struct va_format vaf;
183 va_list args;
184
185 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
186 va_start(args, format);
187 vaf.va = &args;
188 vaf.fmt = format;
189 pr_info_ratelimited("%pV", &vaf);
190 va_end(args);
191 }
192
193 if (binder_stop_on_user_error)
194 binder_stop_on_user_error = 2;
195 }
196
197 #define binder_set_extended_error(ee, _id, _command, _param) \
198 do { \
199 (ee)->id = _id; \
200 (ee)->command = _command; \
201 (ee)->param = _param; \
202 } while (0)
203
204 #define to_flat_binder_object(hdr) \
205 container_of(hdr, struct flat_binder_object, hdr)
206
207 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
208
209 #define to_binder_buffer_object(hdr) \
210 container_of(hdr, struct binder_buffer_object, hdr)
211
212 #define to_binder_fd_array_object(hdr) \
213 container_of(hdr, struct binder_fd_array_object, hdr)
214
215 static struct binder_stats binder_stats;
216
binder_stats_deleted(enum binder_stat_types type)217 static inline void binder_stats_deleted(enum binder_stat_types type)
218 {
219 atomic_inc(&binder_stats.obj_deleted[type]);
220 }
221
binder_stats_created(enum binder_stat_types type)222 static inline void binder_stats_created(enum binder_stat_types type)
223 {
224 atomic_inc(&binder_stats.obj_created[type]);
225 }
226
227 struct binder_transaction_log_entry {
228 int debug_id;
229 int debug_id_done;
230 int call_type;
231 int from_proc;
232 int from_thread;
233 int target_handle;
234 int to_proc;
235 int to_thread;
236 int to_node;
237 int data_size;
238 int offsets_size;
239 int return_error_line;
240 uint32_t return_error;
241 uint32_t return_error_param;
242 char context_name[BINDERFS_MAX_NAME + 1];
243 };
244
245 struct binder_transaction_log {
246 atomic_t cur;
247 bool full;
248 struct binder_transaction_log_entry entry[32];
249 };
250
251 static struct binder_transaction_log binder_transaction_log;
252 static struct binder_transaction_log binder_transaction_log_failed;
253
binder_transaction_log_add( struct binder_transaction_log *log)254 static struct binder_transaction_log_entry *binder_transaction_log_add(
255 struct binder_transaction_log *log)
256 {
257 struct binder_transaction_log_entry *e;
258 unsigned int cur = atomic_inc_return(&log->cur);
259
260 if (cur >= ARRAY_SIZE(log->entry))
261 log->full = true;
262 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
263 WRITE_ONCE(e->debug_id_done, 0);
264 /*
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
268 */
269 smp_wmb();
270 memset(e, 0, sizeof(*e));
271 return e;
272 }
273
274 enum binder_deferred_state {
275 BINDER_DEFERRED_FLUSH = 0x01,
276 BINDER_DEFERRED_RELEASE = 0x02,
277 };
278
279 enum {
280 BINDER_LOOPER_STATE_REGISTERED = 0x01,
281 BINDER_LOOPER_STATE_ENTERED = 0x02,
282 BINDER_LOOPER_STATE_EXITED = 0x04,
283 BINDER_LOOPER_STATE_INVALID = 0x08,
284 BINDER_LOOPER_STATE_WAITING = 0x10,
285 BINDER_LOOPER_STATE_POLL = 0x20,
286 };
287
288 /**
289 * binder_proc_lock() - Acquire outer lock for given binder_proc
290 * @proc: struct binder_proc to acquire
291 *
292 * Acquires proc->outer_lock. Used to protect binder_ref
293 * structures associated with the given proc.
294 */
295 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
296 static void
297 _binder_proc_lock(struct binder_proc *proc, int line)
298 __acquires(&proc->outer_lock)
299 {
300 binder_debug(BINDER_DEBUG_SPINLOCKS,
301 "%s: line=%d\n", __func__, line);
302 spin_lock(&proc->outer_lock);
303 }
304
305 /**
306 * binder_proc_unlock() - Release spinlock for given binder_proc
307 * @proc: struct binder_proc to acquire
308 *
309 * Release lock acquired via binder_proc_lock()
310 */
311 #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
312 static void
313 _binder_proc_unlock(struct binder_proc *proc, int line)
314 __releases(&proc->outer_lock)
315 {
316 binder_debug(BINDER_DEBUG_SPINLOCKS,
317 "%s: line=%d\n", __func__, line);
318 spin_unlock(&proc->outer_lock);
319 }
320
321 /**
322 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
323 * @proc: struct binder_proc to acquire
324 *
325 * Acquires proc->inner_lock. Used to protect todo lists
326 */
327 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
328 static void
329 _binder_inner_proc_lock(struct binder_proc *proc, int line)
330 __acquires(&proc->inner_lock)
331 {
332 binder_debug(BINDER_DEBUG_SPINLOCKS,
333 "%s: line=%d\n", __func__, line);
334 spin_lock(&proc->inner_lock);
335 }
336
337 /**
338 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
339 * @proc: struct binder_proc to acquire
340 *
341 * Release lock acquired via binder_inner_proc_lock()
342 */
343 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
344 static void
345 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
346 __releases(&proc->inner_lock)
347 {
348 binder_debug(BINDER_DEBUG_SPINLOCKS,
349 "%s: line=%d\n", __func__, line);
350 spin_unlock(&proc->inner_lock);
351 }
352
353 /**
354 * binder_node_lock() - Acquire spinlock for given binder_node
355 * @node: struct binder_node to acquire
356 *
357 * Acquires node->lock. Used to protect binder_node fields
358 */
359 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
360 static void
361 _binder_node_lock(struct binder_node *node, int line)
362 __acquires(&node->lock)
363 {
364 binder_debug(BINDER_DEBUG_SPINLOCKS,
365 "%s: line=%d\n", __func__, line);
366 spin_lock(&node->lock);
367 }
368
369 /**
370 * binder_node_unlock() - Release spinlock for given binder_proc
371 * @node: struct binder_node to acquire
372 *
373 * Release lock acquired via binder_node_lock()
374 */
375 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
376 static void
377 _binder_node_unlock(struct binder_node *node, int line)
378 __releases(&node->lock)
379 {
380 binder_debug(BINDER_DEBUG_SPINLOCKS,
381 "%s: line=%d\n", __func__, line);
382 spin_unlock(&node->lock);
383 }
384
385 /**
386 * binder_node_inner_lock() - Acquire node and inner locks
387 * @node: struct binder_node to acquire
388 *
389 * Acquires node->lock. If node->proc also acquires
390 * proc->inner_lock. Used to protect binder_node fields
391 */
392 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
393 static void
394 _binder_node_inner_lock(struct binder_node *node, int line)
395 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
396 {
397 binder_debug(BINDER_DEBUG_SPINLOCKS,
398 "%s: line=%d\n", __func__, line);
399 spin_lock(&node->lock);
400 if (node->proc)
401 binder_inner_proc_lock(node->proc);
402 else
403 /* annotation for sparse */
404 __acquire(&node->proc->inner_lock);
405 }
406
407 /**
408 * binder_node_inner_unlock() - Release node and inner locks
409 * @node: struct binder_node to acquire
410 *
411 * Release lock acquired via binder_node_lock()
412 */
413 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
414 static void
415 _binder_node_inner_unlock(struct binder_node *node, int line)
416 __releases(&node->lock) __releases(&node->proc->inner_lock)
417 {
418 struct binder_proc *proc = node->proc;
419
420 binder_debug(BINDER_DEBUG_SPINLOCKS,
421 "%s: line=%d\n", __func__, line);
422 if (proc)
423 binder_inner_proc_unlock(proc);
424 else
425 /* annotation for sparse */
426 __release(&node->proc->inner_lock);
427 spin_unlock(&node->lock);
428 }
429
binder_worklist_empty_ilocked(struct list_head *list)430 static bool binder_worklist_empty_ilocked(struct list_head *list)
431 {
432 return list_empty(list);
433 }
434
435 /**
436 * binder_worklist_empty() - Check if no items on the work list
437 * @proc: binder_proc associated with list
438 * @list: list to check
439 *
440 * Return: true if there are no items on list, else false
441 */
binder_worklist_empty(struct binder_proc *proc, struct list_head *list)442 static bool binder_worklist_empty(struct binder_proc *proc,
443 struct list_head *list)
444 {
445 bool ret;
446
447 binder_inner_proc_lock(proc);
448 ret = binder_worklist_empty_ilocked(list);
449 binder_inner_proc_unlock(proc);
450 return ret;
451 }
452
453 /**
454 * binder_enqueue_work_ilocked() - Add an item to the work list
455 * @work: struct binder_work to add to list
456 * @target_list: list to add work to
457 *
458 * Adds the work to the specified list. Asserts that work
459 * is not already on a list.
460 *
461 * Requires the proc->inner_lock to be held.
462 */
463 static void
binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list)464 binder_enqueue_work_ilocked(struct binder_work *work,
465 struct list_head *target_list)
466 {
467 BUG_ON(target_list == NULL);
468 BUG_ON(work->entry.next && !list_empty(&work->entry));
469 list_add_tail(&work->entry, target_list);
470 }
471
472 /**
473 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
474 * @thread: thread to queue work to
475 * @work: struct binder_work to add to list
476 *
477 * Adds the work to the todo list of the thread. Doesn't set the process_todo
478 * flag, which means that (if it wasn't already set) the thread will go to
479 * sleep without handling this work when it calls read.
480 *
481 * Requires the proc->inner_lock to be held.
482 */
483 static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work)484 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
485 struct binder_work *work)
486 {
487 WARN_ON(!list_empty(&thread->waiting_thread_node));
488 binder_enqueue_work_ilocked(work, &thread->todo);
489 }
490
491 /**
492 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
493 * @thread: thread to queue work to
494 * @work: struct binder_work to add to list
495 *
496 * Adds the work to the todo list of the thread, and enables processing
497 * of the todo queue.
498 *
499 * Requires the proc->inner_lock to be held.
500 */
501 static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work)502 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
503 struct binder_work *work)
504 {
505 WARN_ON(!list_empty(&thread->waiting_thread_node));
506 binder_enqueue_work_ilocked(work, &thread->todo);
507
508 /* (e)poll-based threads require an explicit wakeup signal when
509 * queuing their own work; they rely on these events to consume
510 * messages without I/O block. Without it, threads risk waiting
511 * indefinitely without handling the work.
512 */
513 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
514 thread->pid == current->pid && !thread->process_todo)
515 wake_up_interruptible_sync(&thread->wait);
516
517 thread->process_todo = true;
518 }
519
520 /**
521 * binder_enqueue_thread_work() - Add an item to the thread work list
522 * @thread: thread to queue work to
523 * @work: struct binder_work to add to list
524 *
525 * Adds the work to the todo list of the thread, and enables processing
526 * of the todo queue.
527 */
528 static void
binder_enqueue_thread_work(struct binder_thread *thread, struct binder_work *work)529 binder_enqueue_thread_work(struct binder_thread *thread,
530 struct binder_work *work)
531 {
532 binder_inner_proc_lock(thread->proc);
533 binder_enqueue_thread_work_ilocked(thread, work);
534 binder_inner_proc_unlock(thread->proc);
535 }
536
537 static void
binder_dequeue_work_ilocked(struct binder_work *work)538 binder_dequeue_work_ilocked(struct binder_work *work)
539 {
540 list_del_init(&work->entry);
541 }
542
543 /**
544 * binder_dequeue_work() - Removes an item from the work list
545 * @proc: binder_proc associated with list
546 * @work: struct binder_work to remove from list
547 *
548 * Removes the specified work item from whatever list it is on.
549 * Can safely be called if work is not on any list.
550 */
551 static void
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)552 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
553 {
554 binder_inner_proc_lock(proc);
555 binder_dequeue_work_ilocked(work);
556 binder_inner_proc_unlock(proc);
557 }
558
binder_dequeue_work_head_ilocked( struct list_head *list)559 static struct binder_work *binder_dequeue_work_head_ilocked(
560 struct list_head *list)
561 {
562 struct binder_work *w;
563
564 w = list_first_entry_or_null(list, struct binder_work, entry);
565 if (w)
566 list_del_init(&w->entry);
567 return w;
568 }
569
570 static void
571 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
572 static void binder_free_thread(struct binder_thread *thread);
573 static void binder_free_proc(struct binder_proc *proc);
574 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
575
576 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
binder_clock(void)577 static inline u64 binder_clock(void)
578 {
579 #ifdef CONFIG_TRACE_CLOCK
580 return trace_clock_local();
581 #endif
582 return 0;
583 }
584 #endif
585
binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work)586 static bool binder_has_work_ilocked(struct binder_thread *thread,
587 bool do_proc_work)
588 {
589 return thread->process_todo ||
590 thread->looper_need_return ||
591 (do_proc_work &&
592 !binder_worklist_empty_ilocked(&thread->proc->todo));
593 }
594
binder_has_work(struct binder_thread *thread, bool do_proc_work)595 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
596 {
597 bool has_work;
598
599 binder_inner_proc_lock(thread->proc);
600 has_work = binder_has_work_ilocked(thread, do_proc_work);
601 binder_inner_proc_unlock(thread->proc);
602
603 return has_work;
604 }
605
binder_available_for_proc_work_ilocked(struct binder_thread *thread)606 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
607 {
608 return !thread->transaction_stack &&
609 binder_worklist_empty_ilocked(&thread->todo) &&
610 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
611 BINDER_LOOPER_STATE_REGISTERED));
612 }
613
binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, bool sync)614 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
615 bool sync)
616 {
617 struct rb_node *n;
618 struct binder_thread *thread;
619
620 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
621 thread = rb_entry(n, struct binder_thread, rb_node);
622 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
623 binder_available_for_proc_work_ilocked(thread)) {
624 if (sync)
625 wake_up_interruptible_sync(&thread->wait);
626 else
627 wake_up_interruptible(&thread->wait);
628 }
629 }
630 }
631
632 /**
633 * binder_select_thread_ilocked() - selects a thread for doing proc work.
634 * @proc: process to select a thread from
635 *
636 * Note that calling this function moves the thread off the waiting_threads
637 * list, so it can only be woken up by the caller of this function, or a
638 * signal. Therefore, callers *should* always wake up the thread this function
639 * returns.
640 *
641 * Return: If there's a thread currently waiting for process work,
642 * returns that thread. Otherwise returns NULL.
643 */
644 static struct binder_thread *
binder_select_thread_ilocked(struct binder_proc *proc)645 binder_select_thread_ilocked(struct binder_proc *proc)
646 {
647 struct binder_thread *thread;
648
649 assert_spin_locked(&proc->inner_lock);
650 thread = list_first_entry_or_null(&proc->waiting_threads,
651 struct binder_thread,
652 waiting_thread_node);
653
654 if (thread)
655 list_del_init(&thread->waiting_thread_node);
656
657 return thread;
658 }
659
660 /**
661 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
662 * @proc: process to wake up a thread in
663 * @thread: specific thread to wake-up (may be NULL)
664 * @sync: whether to do a synchronous wake-up
665 *
666 * This function wakes up a thread in the @proc process.
667 * The caller may provide a specific thread to wake-up in
668 * the @thread parameter. If @thread is NULL, this function
669 * will wake up threads that have called poll().
670 *
671 * Note that for this function to work as expected, callers
672 * should first call binder_select_thread() to find a thread
673 * to handle the work (if they don't have a thread already),
674 * and pass the result into the @thread parameter.
675 */
binder_wakeup_thread_ilocked(struct binder_proc *proc, struct binder_thread *thread, bool sync)676 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
677 struct binder_thread *thread,
678 bool sync)
679 {
680 assert_spin_locked(&proc->inner_lock);
681
682 if (thread) {
683 if (sync)
684 wake_up_interruptible_sync(&thread->wait);
685 else
686 wake_up_interruptible(&thread->wait);
687 return;
688 }
689
690 /* Didn't find a thread waiting for proc work; this can happen
691 * in two scenarios:
692 * 1. All threads are busy handling transactions
693 * In that case, one of those threads should call back into
694 * the kernel driver soon and pick up this work.
695 * 2. Threads are using the (e)poll interface, in which case
696 * they may be blocked on the waitqueue without having been
697 * added to waiting_threads. For this case, we just iterate
698 * over all threads not handling transaction work, and
699 * wake them all up. We wake all because we don't know whether
700 * a thread that called into (e)poll is handling non-binder
701 * work currently.
702 */
703 binder_wakeup_poll_threads_ilocked(proc, sync);
704 }
705
binder_wakeup_proc_ilocked(struct binder_proc *proc)706 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
707 {
708 struct binder_thread *thread = binder_select_thread_ilocked(proc);
709
710 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
711 }
712
binder_set_nice(long nice)713 static void binder_set_nice(long nice)
714 {
715 long min_nice;
716
717 if (can_nice(current, nice)) {
718 set_user_nice(current, nice);
719 return;
720 }
721 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
722 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
723 "%d: nice value %ld not allowed use %ld instead\n",
724 current->pid, nice, min_nice);
725 set_user_nice(current, min_nice);
726 if (min_nice <= MAX_NICE)
727 return;
728 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
729 }
730
binder_get_node_ilocked(struct binder_proc *proc, binder_uintptr_t ptr)731 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
732 binder_uintptr_t ptr)
733 {
734 struct rb_node *n = proc->nodes.rb_node;
735 struct binder_node *node;
736
737 assert_spin_locked(&proc->inner_lock);
738
739 while (n) {
740 node = rb_entry(n, struct binder_node, rb_node);
741
742 if (ptr < node->ptr)
743 n = n->rb_left;
744 else if (ptr > node->ptr)
745 n = n->rb_right;
746 else {
747 /*
748 * take an implicit weak reference
749 * to ensure node stays alive until
750 * call to binder_put_node()
751 */
752 binder_inc_node_tmpref_ilocked(node);
753 return node;
754 }
755 }
756 return NULL;
757 }
758
binder_get_node(struct binder_proc *proc, binder_uintptr_t ptr)759 static struct binder_node *binder_get_node(struct binder_proc *proc,
760 binder_uintptr_t ptr)
761 {
762 struct binder_node *node;
763
764 binder_inner_proc_lock(proc);
765 node = binder_get_node_ilocked(proc, ptr);
766 binder_inner_proc_unlock(proc);
767 return node;
768 }
769
binder_init_node_ilocked( struct binder_proc *proc, struct binder_node *new_node, struct flat_binder_object *fp)770 static struct binder_node *binder_init_node_ilocked(
771 struct binder_proc *proc,
772 struct binder_node *new_node,
773 struct flat_binder_object *fp)
774 {
775 struct rb_node **p = &proc->nodes.rb_node;
776 struct rb_node *parent = NULL;
777 struct binder_node *node;
778 binder_uintptr_t ptr = fp ? fp->binder : 0;
779 binder_uintptr_t cookie = fp ? fp->cookie : 0;
780 __u32 flags = fp ? fp->flags : 0;
781
782 assert_spin_locked(&proc->inner_lock);
783
784 while (*p) {
785
786 parent = *p;
787 node = rb_entry(parent, struct binder_node, rb_node);
788
789 if (ptr < node->ptr)
790 p = &(*p)->rb_left;
791 else if (ptr > node->ptr)
792 p = &(*p)->rb_right;
793 else {
794 /*
795 * A matching node is already in
796 * the rb tree. Abandon the init
797 * and return it.
798 */
799 binder_inc_node_tmpref_ilocked(node);
800 return node;
801 }
802 }
803 node = new_node;
804 binder_stats_created(BINDER_STAT_NODE);
805 node->tmp_refs++;
806 rb_link_node(&node->rb_node, parent, p);
807 rb_insert_color(&node->rb_node, &proc->nodes);
808 node->debug_id = atomic_inc_return(&binder_last_id);
809 node->proc = proc;
810 node->ptr = ptr;
811 node->cookie = cookie;
812 node->work.type = BINDER_WORK_NODE;
813 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
814 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
815 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
816 spin_lock_init(&node->lock);
817 INIT_LIST_HEAD(&node->work.entry);
818 INIT_LIST_HEAD(&node->async_todo);
819 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
820 "%d:%d node %d u%016llx c%016llx created\n",
821 proc->pid, current->pid, node->debug_id,
822 (u64)node->ptr, (u64)node->cookie);
823
824 return node;
825 }
826
binder_new_node(struct binder_proc *proc, struct flat_binder_object *fp)827 static struct binder_node *binder_new_node(struct binder_proc *proc,
828 struct flat_binder_object *fp)
829 {
830 struct binder_node *node;
831 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
832
833 if (!new_node)
834 return NULL;
835 binder_inner_proc_lock(proc);
836 node = binder_init_node_ilocked(proc, new_node, fp);
837 binder_inner_proc_unlock(proc);
838 if (node != new_node)
839 /*
840 * The node was already added by another thread
841 */
842 kfree(new_node);
843
844 return node;
845 }
846
binder_free_node(struct binder_node *node)847 static void binder_free_node(struct binder_node *node)
848 {
849 kfree(node);
850 binder_stats_deleted(BINDER_STAT_NODE);
851 }
852
binder_inc_node_nilocked(struct binder_node *node, int strong, int internal, struct list_head *target_list)853 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
854 int internal,
855 struct list_head *target_list)
856 {
857 struct binder_proc *proc = node->proc;
858
859 assert_spin_locked(&node->lock);
860 if (proc)
861 assert_spin_locked(&proc->inner_lock);
862 if (strong) {
863 if (internal) {
864 if (target_list == NULL &&
865 node->internal_strong_refs == 0 &&
866 !(node->proc &&
867 node == node->proc->context->binder_context_mgr_node &&
868 node->has_strong_ref)) {
869 pr_err("invalid inc strong node for %d\n",
870 node->debug_id);
871 return -EINVAL;
872 }
873 node->internal_strong_refs++;
874 } else
875 node->local_strong_refs++;
876 if (!node->has_strong_ref && target_list) {
877 struct binder_thread *thread = container_of(target_list,
878 struct binder_thread, todo);
879 binder_dequeue_work_ilocked(&node->work);
880 BUG_ON(&thread->todo != target_list);
881 binder_enqueue_deferred_thread_work_ilocked(thread,
882 &node->work);
883 }
884 } else {
885 if (!internal)
886 node->local_weak_refs++;
887 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
888 if (target_list == NULL) {
889 pr_err("invalid inc weak node for %d\n",
890 node->debug_id);
891 return -EINVAL;
892 }
893 /*
894 * See comment above
895 */
896 binder_enqueue_work_ilocked(&node->work, target_list);
897 }
898 }
899 return 0;
900 }
901
binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list)902 static int binder_inc_node(struct binder_node *node, int strong, int internal,
903 struct list_head *target_list)
904 {
905 int ret;
906
907 binder_node_inner_lock(node);
908 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
909 binder_node_inner_unlock(node);
910
911 return ret;
912 }
913
binder_dec_node_nilocked(struct binder_node *node, int strong, int internal)914 static bool binder_dec_node_nilocked(struct binder_node *node,
915 int strong, int internal)
916 {
917 struct binder_proc *proc = node->proc;
918
919 assert_spin_locked(&node->lock);
920 if (proc)
921 assert_spin_locked(&proc->inner_lock);
922 if (strong) {
923 if (internal)
924 node->internal_strong_refs--;
925 else
926 node->local_strong_refs--;
927 if (node->local_strong_refs || node->internal_strong_refs)
928 return false;
929 } else {
930 if (!internal)
931 node->local_weak_refs--;
932 if (node->local_weak_refs || node->tmp_refs ||
933 !hlist_empty(&node->refs))
934 return false;
935 }
936
937 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
938 if (list_empty(&node->work.entry)) {
939 binder_enqueue_work_ilocked(&node->work, &proc->todo);
940 binder_wakeup_proc_ilocked(proc);
941 }
942 } else {
943 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
944 !node->local_weak_refs && !node->tmp_refs) {
945 if (proc) {
946 binder_dequeue_work_ilocked(&node->work);
947 rb_erase(&node->rb_node, &proc->nodes);
948 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
949 "refless node %d deleted\n",
950 node->debug_id);
951 } else {
952 BUG_ON(!list_empty(&node->work.entry));
953 spin_lock(&binder_dead_nodes_lock);
954 /*
955 * tmp_refs could have changed so
956 * check it again
957 */
958 if (node->tmp_refs) {
959 spin_unlock(&binder_dead_nodes_lock);
960 return false;
961 }
962 hlist_del(&node->dead_node);
963 spin_unlock(&binder_dead_nodes_lock);
964 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
965 "dead node %d deleted\n",
966 node->debug_id);
967 }
968 return true;
969 }
970 }
971 return false;
972 }
973
binder_dec_node(struct binder_node *node, int strong, int internal)974 static void binder_dec_node(struct binder_node *node, int strong, int internal)
975 {
976 bool free_node;
977
978 binder_node_inner_lock(node);
979 free_node = binder_dec_node_nilocked(node, strong, internal);
980 binder_node_inner_unlock(node);
981 if (free_node)
982 binder_free_node(node);
983 }
984
binder_inc_node_tmpref_ilocked(struct binder_node *node)985 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
986 {
987 /*
988 * No call to binder_inc_node() is needed since we
989 * don't need to inform userspace of any changes to
990 * tmp_refs
991 */
992 node->tmp_refs++;
993 }
994
995 /**
996 * binder_inc_node_tmpref() - take a temporary reference on node
997 * @node: node to reference
998 *
999 * Take reference on node to prevent the node from being freed
1000 * while referenced only by a local variable. The inner lock is
1001 * needed to serialize with the node work on the queue (which
1002 * isn't needed after the node is dead). If the node is dead
1003 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1004 * node->tmp_refs against dead-node-only cases where the node
1005 * lock cannot be acquired (eg traversing the dead node list to
1006 * print nodes)
1007 */
binder_inc_node_tmpref(struct binder_node *node)1008 static void binder_inc_node_tmpref(struct binder_node *node)
1009 {
1010 binder_node_lock(node);
1011 if (node->proc)
1012 binder_inner_proc_lock(node->proc);
1013 else
1014 spin_lock(&binder_dead_nodes_lock);
1015 binder_inc_node_tmpref_ilocked(node);
1016 if (node->proc)
1017 binder_inner_proc_unlock(node->proc);
1018 else
1019 spin_unlock(&binder_dead_nodes_lock);
1020 binder_node_unlock(node);
1021 }
1022
1023 /**
1024 * binder_dec_node_tmpref() - remove a temporary reference on node
1025 * @node: node to reference
1026 *
1027 * Release temporary reference on node taken via binder_inc_node_tmpref()
1028 */
binder_dec_node_tmpref(struct binder_node *node)1029 static void binder_dec_node_tmpref(struct binder_node *node)
1030 {
1031 bool free_node;
1032
1033 binder_node_inner_lock(node);
1034 if (!node->proc)
1035 spin_lock(&binder_dead_nodes_lock);
1036 else
1037 __acquire(&binder_dead_nodes_lock);
1038 node->tmp_refs--;
1039 BUG_ON(node->tmp_refs < 0);
1040 if (!node->proc)
1041 spin_unlock(&binder_dead_nodes_lock);
1042 else
1043 __release(&binder_dead_nodes_lock);
1044 /*
1045 * Call binder_dec_node() to check if all refcounts are 0
1046 * and cleanup is needed. Calling with strong=0 and internal=1
1047 * causes no actual reference to be released in binder_dec_node().
1048 * If that changes, a change is needed here too.
1049 */
1050 free_node = binder_dec_node_nilocked(node, 0, 1);
1051 binder_node_inner_unlock(node);
1052 if (free_node)
1053 binder_free_node(node);
1054 }
1055
binder_put_node(struct binder_node *node)1056 static void binder_put_node(struct binder_node *node)
1057 {
1058 binder_dec_node_tmpref(node);
1059 }
1060
binder_get_ref_olocked(struct binder_proc *proc, u32 desc, bool need_strong_ref)1061 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1062 u32 desc, bool need_strong_ref)
1063 {
1064 struct rb_node *n = proc->refs_by_desc.rb_node;
1065 struct binder_ref *ref;
1066
1067 while (n) {
1068 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1069
1070 if (desc < ref->data.desc) {
1071 n = n->rb_left;
1072 } else if (desc > ref->data.desc) {
1073 n = n->rb_right;
1074 } else if (need_strong_ref && !ref->data.strong) {
1075 binder_user_error("tried to use weak ref as strong ref\n");
1076 return NULL;
1077 } else {
1078 return ref;
1079 }
1080 }
1081 return NULL;
1082 }
1083
1084 /**
1085 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1086 * @proc: binder_proc that owns the ref
1087 * @node: binder_node of target
1088 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1089 *
1090 * Look up the ref for the given node and return it if it exists
1091 *
1092 * If it doesn't exist and the caller provides a newly allocated
1093 * ref, initialize the fields of the newly allocated ref and insert
1094 * into the given proc rb_trees and node refs list.
1095 *
1096 * Return: the ref for node. It is possible that another thread
1097 * allocated/initialized the ref first in which case the
1098 * returned ref would be different than the passed-in
1099 * new_ref. new_ref must be kfree'd by the caller in
1100 * this case.
1101 */
binder_get_ref_for_node_olocked( struct binder_proc *proc, struct binder_node *node, struct binder_ref *new_ref)1102 static struct binder_ref *binder_get_ref_for_node_olocked(
1103 struct binder_proc *proc,
1104 struct binder_node *node,
1105 struct binder_ref *new_ref)
1106 {
1107 struct binder_context *context = proc->context;
1108 struct rb_node **p = &proc->refs_by_node.rb_node;
1109 struct rb_node *parent = NULL;
1110 struct binder_ref *ref;
1111 struct rb_node *n;
1112
1113 while (*p) {
1114 parent = *p;
1115 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1116
1117 if (node < ref->node)
1118 p = &(*p)->rb_left;
1119 else if (node > ref->node)
1120 p = &(*p)->rb_right;
1121 else
1122 return ref;
1123 }
1124 if (!new_ref)
1125 return NULL;
1126
1127 binder_stats_created(BINDER_STAT_REF);
1128 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1129 new_ref->proc = proc;
1130 new_ref->node = node;
1131 rb_link_node(&new_ref->rb_node_node, parent, p);
1132 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1133
1134 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1135 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1136 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1137 if (ref->data.desc > new_ref->data.desc)
1138 break;
1139 new_ref->data.desc = ref->data.desc + 1;
1140 }
1141
1142 p = &proc->refs_by_desc.rb_node;
1143 while (*p) {
1144 parent = *p;
1145 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1146
1147 if (new_ref->data.desc < ref->data.desc)
1148 p = &(*p)->rb_left;
1149 else if (new_ref->data.desc > ref->data.desc)
1150 p = &(*p)->rb_right;
1151 else
1152 BUG();
1153 }
1154 rb_link_node(&new_ref->rb_node_desc, parent, p);
1155 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1156
1157 binder_node_lock(node);
1158 hlist_add_head(&new_ref->node_entry, &node->refs);
1159
1160 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1161 "%d new ref %d desc %d for node %d\n",
1162 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1163 node->debug_id);
1164 binder_node_unlock(node);
1165 return new_ref;
1166 }
1167
binder_cleanup_ref_olocked(struct binder_ref *ref)1168 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1169 {
1170 bool delete_node = false;
1171
1172 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1173 "%d delete ref %d desc %d for node %d\n",
1174 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1175 ref->node->debug_id);
1176
1177 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1178 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1179
1180 binder_node_inner_lock(ref->node);
1181 if (ref->data.strong)
1182 binder_dec_node_nilocked(ref->node, 1, 1);
1183
1184 hlist_del(&ref->node_entry);
1185 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1186 binder_node_inner_unlock(ref->node);
1187 /*
1188 * Clear ref->node unless we want the caller to free the node
1189 */
1190 if (!delete_node) {
1191 /*
1192 * The caller uses ref->node to determine
1193 * whether the node needs to be freed. Clear
1194 * it since the node is still alive.
1195 */
1196 ref->node = NULL;
1197 }
1198
1199 if (ref->death) {
1200 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1201 "%d delete ref %d desc %d has death notification\n",
1202 ref->proc->pid, ref->data.debug_id,
1203 ref->data.desc);
1204 binder_dequeue_work(ref->proc, &ref->death->work);
1205 binder_stats_deleted(BINDER_STAT_DEATH);
1206 }
1207 binder_stats_deleted(BINDER_STAT_REF);
1208 }
1209
1210 /**
1211 * binder_inc_ref_olocked() - increment the ref for given handle
1212 * @ref: ref to be incremented
1213 * @strong: if true, strong increment, else weak
1214 * @target_list: list to queue node work on
1215 *
1216 * Increment the ref. @ref->proc->outer_lock must be held on entry
1217 *
1218 * Return: 0, if successful, else errno
1219 */
binder_inc_ref_olocked(struct binder_ref *ref, int strong, struct list_head *target_list)1220 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1221 struct list_head *target_list)
1222 {
1223 int ret;
1224
1225 if (strong) {
1226 if (ref->data.strong == 0) {
1227 ret = binder_inc_node(ref->node, 1, 1, target_list);
1228 if (ret)
1229 return ret;
1230 }
1231 ref->data.strong++;
1232 } else {
1233 if (ref->data.weak == 0) {
1234 ret = binder_inc_node(ref->node, 0, 1, target_list);
1235 if (ret)
1236 return ret;
1237 }
1238 ref->data.weak++;
1239 }
1240 return 0;
1241 }
1242
1243 /**
1244 * binder_dec_ref_olocked() - dec the ref for given handle
1245 * @ref: ref to be decremented
1246 * @strong: if true, strong decrement, else weak
1247 *
1248 * Decrement the ref.
1249 *
1250 * Return: %true if ref is cleaned up and ready to be freed.
1251 */
binder_dec_ref_olocked(struct binder_ref *ref, int strong)1252 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1253 {
1254 if (strong) {
1255 if (ref->data.strong == 0) {
1256 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1257 ref->proc->pid, ref->data.debug_id,
1258 ref->data.desc, ref->data.strong,
1259 ref->data.weak);
1260 return false;
1261 }
1262 ref->data.strong--;
1263 if (ref->data.strong == 0)
1264 binder_dec_node(ref->node, strong, 1);
1265 } else {
1266 if (ref->data.weak == 0) {
1267 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1268 ref->proc->pid, ref->data.debug_id,
1269 ref->data.desc, ref->data.strong,
1270 ref->data.weak);
1271 return false;
1272 }
1273 ref->data.weak--;
1274 }
1275 if (ref->data.strong == 0 && ref->data.weak == 0) {
1276 binder_cleanup_ref_olocked(ref);
1277 return true;
1278 }
1279 return false;
1280 }
1281
1282 /**
1283 * binder_get_node_from_ref() - get the node from the given proc/desc
1284 * @proc: proc containing the ref
1285 * @desc: the handle associated with the ref
1286 * @need_strong_ref: if true, only return node if ref is strong
1287 * @rdata: the id/refcount data for the ref
1288 *
1289 * Given a proc and ref handle, return the associated binder_node
1290 *
1291 * Return: a binder_node or NULL if not found or not strong when strong required
1292 */
binder_get_node_from_ref( struct binder_proc *proc, u32 desc, bool need_strong_ref, struct binder_ref_data *rdata)1293 static struct binder_node *binder_get_node_from_ref(
1294 struct binder_proc *proc,
1295 u32 desc, bool need_strong_ref,
1296 struct binder_ref_data *rdata)
1297 {
1298 struct binder_node *node;
1299 struct binder_ref *ref;
1300
1301 binder_proc_lock(proc);
1302 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1303 if (!ref)
1304 goto err_no_ref;
1305 node = ref->node;
1306 /*
1307 * Take an implicit reference on the node to ensure
1308 * it stays alive until the call to binder_put_node()
1309 */
1310 binder_inc_node_tmpref(node);
1311 if (rdata)
1312 *rdata = ref->data;
1313 binder_proc_unlock(proc);
1314
1315 return node;
1316
1317 err_no_ref:
1318 binder_proc_unlock(proc);
1319 return NULL;
1320 }
1321
1322 /**
1323 * binder_free_ref() - free the binder_ref
1324 * @ref: ref to free
1325 *
1326 * Free the binder_ref. Free the binder_node indicated by ref->node
1327 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1328 */
binder_free_ref(struct binder_ref *ref)1329 static void binder_free_ref(struct binder_ref *ref)
1330 {
1331 if (ref->node)
1332 binder_free_node(ref->node);
1333 kfree(ref->death);
1334 kfree(ref);
1335 }
1336
1337 /**
1338 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1339 * @proc: proc containing the ref
1340 * @desc: the handle associated with the ref
1341 * @increment: true=inc reference, false=dec reference
1342 * @strong: true=strong reference, false=weak reference
1343 * @rdata: the id/refcount data for the ref
1344 *
1345 * Given a proc and ref handle, increment or decrement the ref
1346 * according to "increment" arg.
1347 *
1348 * Return: 0 if successful, else errno
1349 */
binder_update_ref_for_handle(struct binder_proc *proc, uint32_t desc, bool increment, bool strong, struct binder_ref_data *rdata)1350 static int binder_update_ref_for_handle(struct binder_proc *proc,
1351 uint32_t desc, bool increment, bool strong,
1352 struct binder_ref_data *rdata)
1353 {
1354 int ret = 0;
1355 struct binder_ref *ref;
1356 bool delete_ref = false;
1357
1358 binder_proc_lock(proc);
1359 ref = binder_get_ref_olocked(proc, desc, strong);
1360 if (!ref) {
1361 ret = -EINVAL;
1362 goto err_no_ref;
1363 }
1364 if (increment)
1365 ret = binder_inc_ref_olocked(ref, strong, NULL);
1366 else
1367 delete_ref = binder_dec_ref_olocked(ref, strong);
1368
1369 if (rdata)
1370 *rdata = ref->data;
1371 binder_proc_unlock(proc);
1372
1373 if (delete_ref)
1374 binder_free_ref(ref);
1375 return ret;
1376
1377 err_no_ref:
1378 binder_proc_unlock(proc);
1379 return ret;
1380 }
1381
1382 /**
1383 * binder_dec_ref_for_handle() - dec the ref for given handle
1384 * @proc: proc containing the ref
1385 * @desc: the handle associated with the ref
1386 * @strong: true=strong reference, false=weak reference
1387 * @rdata: the id/refcount data for the ref
1388 *
1389 * Just calls binder_update_ref_for_handle() to decrement the ref.
1390 *
1391 * Return: 0 if successful, else errno
1392 */
binder_dec_ref_for_handle(struct binder_proc *proc, uint32_t desc, bool strong, struct binder_ref_data *rdata)1393 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1394 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1395 {
1396 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1397 }
1398
1399
1400 /**
1401 * binder_inc_ref_for_node() - increment the ref for given proc/node
1402 * @proc: proc containing the ref
1403 * @node: target node
1404 * @strong: true=strong reference, false=weak reference
1405 * @target_list: worklist to use if node is incremented
1406 * @rdata: the id/refcount data for the ref
1407 *
1408 * Given a proc and node, increment the ref. Create the ref if it
1409 * doesn't already exist
1410 *
1411 * Return: 0 if successful, else errno
1412 */
binder_inc_ref_for_node(struct binder_proc *proc, struct binder_node *node, bool strong, struct list_head *target_list, struct binder_ref_data *rdata)1413 static int binder_inc_ref_for_node(struct binder_proc *proc,
1414 struct binder_node *node,
1415 bool strong,
1416 struct list_head *target_list,
1417 struct binder_ref_data *rdata)
1418 {
1419 struct binder_ref *ref;
1420 struct binder_ref *new_ref = NULL;
1421 int ret = 0;
1422
1423 binder_proc_lock(proc);
1424 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1425 if (!ref) {
1426 binder_proc_unlock(proc);
1427 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1428 if (!new_ref)
1429 return -ENOMEM;
1430 binder_proc_lock(proc);
1431 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1432 }
1433 ret = binder_inc_ref_olocked(ref, strong, target_list);
1434 *rdata = ref->data;
1435 if (ret && ref == new_ref) {
1436 /*
1437 * Cleanup the failed reference here as the target
1438 * could now be dead and have already released its
1439 * references by now. Calling on the new reference
1440 * with strong=0 and a tmp_refs will not decrement
1441 * the node. The new_ref gets kfree'd below.
1442 */
1443 binder_cleanup_ref_olocked(new_ref);
1444 ref = NULL;
1445 }
1446
1447 binder_proc_unlock(proc);
1448 if (new_ref && ref != new_ref)
1449 /*
1450 * Another thread created the ref first so
1451 * free the one we allocated
1452 */
1453 kfree(new_ref);
1454 return ret;
1455 }
1456
binder_pop_transaction_ilocked(struct binder_thread *target_thread, struct binder_transaction *t)1457 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1458 struct binder_transaction *t)
1459 {
1460 BUG_ON(!target_thread);
1461 assert_spin_locked(&target_thread->proc->inner_lock);
1462 BUG_ON(target_thread->transaction_stack != t);
1463 BUG_ON(target_thread->transaction_stack->from != target_thread);
1464 target_thread->transaction_stack =
1465 target_thread->transaction_stack->from_parent;
1466 t->from = NULL;
1467 }
1468
1469 /**
1470 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1471 * @thread: thread to decrement
1472 *
1473 * A thread needs to be kept alive while being used to create or
1474 * handle a transaction. binder_get_txn_from() is used to safely
1475 * extract t->from from a binder_transaction and keep the thread
1476 * indicated by t->from from being freed. When done with that
1477 * binder_thread, this function is called to decrement the
1478 * tmp_ref and free if appropriate (thread has been released
1479 * and no transaction being processed by the driver)
1480 */
binder_thread_dec_tmpref(struct binder_thread *thread)1481 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1482 {
1483 /*
1484 * atomic is used to protect the counter value while
1485 * it cannot reach zero or thread->is_dead is false
1486 */
1487 binder_inner_proc_lock(thread->proc);
1488 atomic_dec(&thread->tmp_ref);
1489 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1490 binder_inner_proc_unlock(thread->proc);
1491 binder_free_thread(thread);
1492 return;
1493 }
1494 binder_inner_proc_unlock(thread->proc);
1495 }
1496
1497 /**
1498 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1499 * @proc: proc to decrement
1500 *
1501 * A binder_proc needs to be kept alive while being used to create or
1502 * handle a transaction. proc->tmp_ref is incremented when
1503 * creating a new transaction or the binder_proc is currently in-use
1504 * by threads that are being released. When done with the binder_proc,
1505 * this function is called to decrement the counter and free the
1506 * proc if appropriate (proc has been released, all threads have
1507 * been released and not currenly in-use to process a transaction).
1508 */
binder_proc_dec_tmpref(struct binder_proc *proc)1509 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1510 {
1511 binder_inner_proc_lock(proc);
1512 proc->tmp_ref--;
1513 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1514 !proc->tmp_ref) {
1515 binder_inner_proc_unlock(proc);
1516 binder_free_proc(proc);
1517 return;
1518 }
1519 binder_inner_proc_unlock(proc);
1520 }
1521
1522 /**
1523 * binder_get_txn_from() - safely extract the "from" thread in transaction
1524 * @t: binder transaction for t->from
1525 *
1526 * Atomically return the "from" thread and increment the tmp_ref
1527 * count for the thread to ensure it stays alive until
1528 * binder_thread_dec_tmpref() is called.
1529 *
1530 * Return: the value of t->from
1531 */
binder_get_txn_from( struct binder_transaction *t)1532 static struct binder_thread *binder_get_txn_from(
1533 struct binder_transaction *t)
1534 {
1535 struct binder_thread *from;
1536
1537 spin_lock(&t->lock);
1538 from = t->from;
1539 if (from)
1540 atomic_inc(&from->tmp_ref);
1541 spin_unlock(&t->lock);
1542 return from;
1543 }
1544
1545 /**
1546 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1547 * @t: binder transaction for t->from
1548 *
1549 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1550 * to guarantee that the thread cannot be released while operating on it.
1551 * The caller must call binder_inner_proc_unlock() to release the inner lock
1552 * as well as call binder_dec_thread_txn() to release the reference.
1553 *
1554 * Return: the value of t->from
1555 */
1556 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1557 struct binder_transaction *t)
1558 __acquires(&t->from->proc->inner_lock)
1559 {
1560 struct binder_thread *from;
1561
1562 from = binder_get_txn_from(t);
1563 if (!from) {
1564 __acquire(&from->proc->inner_lock);
1565 return NULL;
1566 }
1567 binder_inner_proc_lock(from->proc);
1568 if (t->from) {
1569 BUG_ON(from != t->from);
1570 return from;
1571 }
1572 binder_inner_proc_unlock(from->proc);
1573 __acquire(&from->proc->inner_lock);
1574 binder_thread_dec_tmpref(from);
1575 return NULL;
1576 }
1577
1578 /**
1579 * binder_free_txn_fixups() - free unprocessed fd fixups
1580 * @t: binder transaction for t->from
1581 *
1582 * If the transaction is being torn down prior to being
1583 * processed by the target process, free all of the
1584 * fd fixups and fput the file structs. It is safe to
1585 * call this function after the fixups have been
1586 * processed -- in that case, the list will be empty.
1587 */
binder_free_txn_fixups(struct binder_transaction *t)1588 static void binder_free_txn_fixups(struct binder_transaction *t)
1589 {
1590 struct binder_txn_fd_fixup *fixup, *tmp;
1591
1592 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1593 fput(fixup->file);
1594 if (fixup->target_fd >= 0)
1595 put_unused_fd(fixup->target_fd);
1596 list_del(&fixup->fixup_entry);
1597 kfree(fixup);
1598 }
1599 }
1600
binder_txn_latency_free(struct binder_transaction *t)1601 static void binder_txn_latency_free(struct binder_transaction *t)
1602 {
1603 int from_proc, from_thread, to_proc, to_thread;
1604
1605 spin_lock(&t->lock);
1606 from_proc = t->from ? t->from->proc->pid : 0;
1607 from_thread = t->from ? t->from->pid : 0;
1608 to_proc = t->to_proc ? t->to_proc->pid : 0;
1609 to_thread = t->to_thread ? t->to_thread->pid : 0;
1610 spin_unlock(&t->lock);
1611
1612 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1613 }
1614
binder_free_transaction(struct binder_transaction *t)1615 static void binder_free_transaction(struct binder_transaction *t)
1616 {
1617 struct binder_proc *target_proc = t->to_proc;
1618
1619 if (target_proc) {
1620 binder_inner_proc_lock(target_proc);
1621 target_proc->outstanding_txns--;
1622 if (target_proc->outstanding_txns < 0)
1623 pr_warn("%s: Unexpected outstanding_txns %d\n",
1624 __func__, target_proc->outstanding_txns);
1625 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1626 wake_up_interruptible_all(&target_proc->freeze_wait);
1627 if (t->buffer)
1628 t->buffer->transaction = NULL;
1629 binder_inner_proc_unlock(target_proc);
1630 }
1631 if (trace_binder_txn_latency_free_enabled())
1632 binder_txn_latency_free(t);
1633 /*
1634 * If the transaction has no target_proc, then
1635 * t->buffer->transaction has already been cleared.
1636 */
1637 binder_free_txn_fixups(t);
1638 kfree(t);
1639 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1640 }
1641
binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code)1642 static void binder_send_failed_reply(struct binder_transaction *t,
1643 uint32_t error_code)
1644 {
1645 struct binder_thread *target_thread;
1646 struct binder_transaction *next;
1647
1648 BUG_ON(t->flags & TF_ONE_WAY);
1649 while (1) {
1650 target_thread = binder_get_txn_from_and_acq_inner(t);
1651 if (target_thread) {
1652 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1653 "send failed reply for transaction %d to %d:%d\n",
1654 t->debug_id,
1655 target_thread->proc->pid,
1656 target_thread->pid);
1657
1658 binder_pop_transaction_ilocked(target_thread, t);
1659 if (target_thread->reply_error.cmd == BR_OK) {
1660 target_thread->reply_error.cmd = error_code;
1661 binder_enqueue_thread_work_ilocked(
1662 target_thread,
1663 &target_thread->reply_error.work);
1664 wake_up_interruptible(&target_thread->wait);
1665 } else {
1666 /*
1667 * Cannot get here for normal operation, but
1668 * we can if multiple synchronous transactions
1669 * are sent without blocking for responses.
1670 * Just ignore the 2nd error in this case.
1671 */
1672 pr_warn("Unexpected reply error: %u\n",
1673 target_thread->reply_error.cmd);
1674 }
1675 binder_inner_proc_unlock(target_thread->proc);
1676 binder_thread_dec_tmpref(target_thread);
1677 binder_free_transaction(t);
1678 return;
1679 }
1680 __release(&target_thread->proc->inner_lock);
1681 next = t->from_parent;
1682
1683 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1684 "send failed reply for transaction %d, target dead\n",
1685 t->debug_id);
1686
1687 binder_free_transaction(t);
1688 if (next == NULL) {
1689 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1690 "reply failed, no target thread at root\n");
1691 return;
1692 }
1693 t = next;
1694 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1695 "reply failed, no target thread -- retry %d\n",
1696 t->debug_id);
1697 }
1698 }
1699
1700 /**
1701 * binder_cleanup_transaction() - cleans up undelivered transaction
1702 * @t: transaction that needs to be cleaned up
1703 * @reason: reason the transaction wasn't delivered
1704 * @error_code: error to return to caller (if synchronous call)
1705 */
binder_cleanup_transaction(struct binder_transaction *t, const char *reason, uint32_t error_code)1706 static void binder_cleanup_transaction(struct binder_transaction *t,
1707 const char *reason,
1708 uint32_t error_code)
1709 {
1710 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1711 binder_send_failed_reply(t, error_code);
1712 } else {
1713 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1714 "undelivered transaction %d, %s\n",
1715 t->debug_id, reason);
1716 binder_free_transaction(t);
1717 }
1718 }
1719
1720 /**
1721 * binder_get_object() - gets object and checks for valid metadata
1722 * @proc: binder_proc owning the buffer
1723 * @u: sender's user pointer to base of buffer
1724 * @buffer: binder_buffer that we're parsing.
1725 * @offset: offset in the @buffer at which to validate an object.
1726 * @object: struct binder_object to read into
1727 *
1728 * Copy the binder object at the given offset into @object. If @u is
1729 * provided then the copy is from the sender's buffer. If not, then
1730 * it is copied from the target's @buffer.
1731 *
1732 * Return: If there's a valid metadata object at @offset, the
1733 * size of that object. Otherwise, it returns zero. The object
1734 * is read into the struct binder_object pointed to by @object.
1735 */
binder_get_object(struct binder_proc *proc, const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object)1736 static size_t binder_get_object(struct binder_proc *proc,
1737 const void __user *u,
1738 struct binder_buffer *buffer,
1739 unsigned long offset,
1740 struct binder_object *object)
1741 {
1742 size_t read_size;
1743 struct binder_object_header *hdr;
1744 size_t object_size = 0;
1745
1746 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1747 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1748 return 0;
1749 if (u) {
1750 if (copy_from_user(object, u + offset, read_size))
1751 return 0;
1752 } else {
1753 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1754 offset, read_size))
1755 return 0;
1756 }
1757
1758 /* Ok, now see if we read a complete object. */
1759 hdr = &object->hdr;
1760 switch (hdr->type) {
1761 case BINDER_TYPE_BINDER:
1762 case BINDER_TYPE_WEAK_BINDER:
1763 case BINDER_TYPE_HANDLE:
1764 case BINDER_TYPE_WEAK_HANDLE:
1765 object_size = sizeof(struct flat_binder_object);
1766 break;
1767 case BINDER_TYPE_FD:
1768 object_size = sizeof(struct binder_fd_object);
1769 break;
1770 case BINDER_TYPE_PTR:
1771 object_size = sizeof(struct binder_buffer_object);
1772 break;
1773 case BINDER_TYPE_FDA:
1774 object_size = sizeof(struct binder_fd_array_object);
1775 break;
1776 default:
1777 return 0;
1778 }
1779 if (offset <= buffer->data_size - object_size &&
1780 buffer->data_size >= object_size)
1781 return object_size;
1782 else
1783 return 0;
1784 }
1785
1786 /**
1787 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1788 * @proc: binder_proc owning the buffer
1789 * @b: binder_buffer containing the object
1790 * @object: struct binder_object to read into
1791 * @index: index in offset array at which the binder_buffer_object is
1792 * located
1793 * @start_offset: points to the start of the offset array
1794 * @object_offsetp: offset of @object read from @b
1795 * @num_valid: the number of valid offsets in the offset array
1796 *
1797 * Return: If @index is within the valid range of the offset array
1798 * described by @start and @num_valid, and if there's a valid
1799 * binder_buffer_object at the offset found in index @index
1800 * of the offset array, that object is returned. Otherwise,
1801 * %NULL is returned.
1802 * Note that the offset found in index @index itself is not
1803 * verified; this function assumes that @num_valid elements
1804 * from @start were previously verified to have valid offsets.
1805 * If @object_offsetp is non-NULL, then the offset within
1806 * @b is written to it.
1807 */
binder_validate_ptr( struct binder_proc *proc, struct binder_buffer *b, struct binder_object *object, binder_size_t index, binder_size_t start_offset, binder_size_t *object_offsetp, binder_size_t num_valid)1808 static struct binder_buffer_object *binder_validate_ptr(
1809 struct binder_proc *proc,
1810 struct binder_buffer *b,
1811 struct binder_object *object,
1812 binder_size_t index,
1813 binder_size_t start_offset,
1814 binder_size_t *object_offsetp,
1815 binder_size_t num_valid)
1816 {
1817 size_t object_size;
1818 binder_size_t object_offset;
1819 unsigned long buffer_offset;
1820
1821 if (index >= num_valid)
1822 return NULL;
1823
1824 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1825 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1826 b, buffer_offset,
1827 sizeof(object_offset)))
1828 return NULL;
1829 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1830 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1831 return NULL;
1832 if (object_offsetp)
1833 *object_offsetp = object_offset;
1834
1835 return &object->bbo;
1836 }
1837
1838 /**
1839 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1840 * @proc: binder_proc owning the buffer
1841 * @b: transaction buffer
1842 * @objects_start_offset: offset to start of objects buffer
1843 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1844 * @fixup_offset: start offset in @buffer to fix up
1845 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1846 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1847 *
1848 * Return: %true if a fixup in buffer @buffer at offset @offset is
1849 * allowed.
1850 *
1851 * For safety reasons, we only allow fixups inside a buffer to happen
1852 * at increasing offsets; additionally, we only allow fixup on the last
1853 * buffer object that was verified, or one of its parents.
1854 *
1855 * Example of what is allowed:
1856 *
1857 * A
1858 * B (parent = A, offset = 0)
1859 * C (parent = A, offset = 16)
1860 * D (parent = C, offset = 0)
1861 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1862 *
1863 * Examples of what is not allowed:
1864 *
1865 * Decreasing offsets within the same parent:
1866 * A
1867 * C (parent = A, offset = 16)
1868 * B (parent = A, offset = 0) // decreasing offset within A
1869 *
1870 * Referring to a parent that wasn't the last object or any of its parents:
1871 * A
1872 * B (parent = A, offset = 0)
1873 * C (parent = A, offset = 0)
1874 * C (parent = A, offset = 16)
1875 * D (parent = B, offset = 0) // B is not A or any of A's parents
1876 */
binder_validate_fixup(struct binder_proc *proc, struct binder_buffer *b, binder_size_t objects_start_offset, binder_size_t buffer_obj_offset, binder_size_t fixup_offset, binder_size_t last_obj_offset, binder_size_t last_min_offset)1877 static bool binder_validate_fixup(struct binder_proc *proc,
1878 struct binder_buffer *b,
1879 binder_size_t objects_start_offset,
1880 binder_size_t buffer_obj_offset,
1881 binder_size_t fixup_offset,
1882 binder_size_t last_obj_offset,
1883 binder_size_t last_min_offset)
1884 {
1885 if (!last_obj_offset) {
1886 /* Nothing to fix up in */
1887 return false;
1888 }
1889
1890 while (last_obj_offset != buffer_obj_offset) {
1891 unsigned long buffer_offset;
1892 struct binder_object last_object;
1893 struct binder_buffer_object *last_bbo;
1894 size_t object_size = binder_get_object(proc, NULL, b,
1895 last_obj_offset,
1896 &last_object);
1897 if (object_size != sizeof(*last_bbo))
1898 return false;
1899
1900 last_bbo = &last_object.bbo;
1901 /*
1902 * Safe to retrieve the parent of last_obj, since it
1903 * was already previously verified by the driver.
1904 */
1905 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1906 return false;
1907 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1908 buffer_offset = objects_start_offset +
1909 sizeof(binder_size_t) * last_bbo->parent;
1910 if (binder_alloc_copy_from_buffer(&proc->alloc,
1911 &last_obj_offset,
1912 b, buffer_offset,
1913 sizeof(last_obj_offset)))
1914 return false;
1915 }
1916 return (fixup_offset >= last_min_offset);
1917 }
1918
1919 /**
1920 * struct binder_task_work_cb - for deferred close
1921 *
1922 * @twork: callback_head for task work
1923 * @fd: fd to close
1924 *
1925 * Structure to pass task work to be handled after
1926 * returning from binder_ioctl() via task_work_add().
1927 */
1928 struct binder_task_work_cb {
1929 struct callback_head twork;
1930 struct file *file;
1931 };
1932
1933 /**
1934 * binder_do_fd_close() - close list of file descriptors
1935 * @twork: callback head for task work
1936 *
1937 * It is not safe to call ksys_close() during the binder_ioctl()
1938 * function if there is a chance that binder's own file descriptor
1939 * might be closed. This is to meet the requirements for using
1940 * fdget() (see comments for __fget_light()). Therefore use
1941 * task_work_add() to schedule the close operation once we have
1942 * returned from binder_ioctl(). This function is a callback
1943 * for that mechanism and does the actual ksys_close() on the
1944 * given file descriptor.
1945 */
binder_do_fd_close(struct callback_head *twork)1946 static void binder_do_fd_close(struct callback_head *twork)
1947 {
1948 struct binder_task_work_cb *twcb = container_of(twork,
1949 struct binder_task_work_cb, twork);
1950
1951 fput(twcb->file);
1952 kfree(twcb);
1953 }
1954
1955 /**
1956 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1957 * @fd: file-descriptor to close
1958 *
1959 * See comments in binder_do_fd_close(). This function is used to schedule
1960 * a file-descriptor to be closed after returning from binder_ioctl().
1961 */
binder_deferred_fd_close(int fd)1962 static void binder_deferred_fd_close(int fd)
1963 {
1964 struct binder_task_work_cb *twcb;
1965
1966 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1967 if (!twcb)
1968 return;
1969 init_task_work(&twcb->twork, binder_do_fd_close);
1970 twcb->file = close_fd_get_file(fd);
1971 if (twcb->file) {
1972 // pin it until binder_do_fd_close(); see comments there
1973 get_file(twcb->file);
1974 filp_close(twcb->file, current->files);
1975 task_work_add(current, &twcb->twork, TWA_RESUME);
1976 } else {
1977 kfree(twcb);
1978 }
1979 }
1980
binder_transaction_buffer_release(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, binder_size_t off_end_offset, bool is_failure)1981 static void binder_transaction_buffer_release(struct binder_proc *proc,
1982 struct binder_thread *thread,
1983 struct binder_buffer *buffer,
1984 binder_size_t off_end_offset,
1985 bool is_failure)
1986 {
1987 int debug_id = buffer->debug_id;
1988 binder_size_t off_start_offset, buffer_offset;
1989
1990 binder_debug(BINDER_DEBUG_TRANSACTION,
1991 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1992 proc->pid, buffer->debug_id,
1993 buffer->data_size, buffer->offsets_size,
1994 (unsigned long long)off_end_offset);
1995
1996 if (buffer->target_node)
1997 binder_dec_node(buffer->target_node, 1, 0);
1998
1999 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2000
2001 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2002 buffer_offset += sizeof(binder_size_t)) {
2003 struct binder_object_header *hdr;
2004 size_t object_size = 0;
2005 struct binder_object object;
2006 binder_size_t object_offset;
2007
2008 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2009 buffer, buffer_offset,
2010 sizeof(object_offset)))
2011 object_size = binder_get_object(proc, NULL, buffer,
2012 object_offset, &object);
2013 if (object_size == 0) {
2014 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2015 debug_id, (u64)object_offset, buffer->data_size);
2016 continue;
2017 }
2018 hdr = &object.hdr;
2019 switch (hdr->type) {
2020 case BINDER_TYPE_BINDER:
2021 case BINDER_TYPE_WEAK_BINDER: {
2022 struct flat_binder_object *fp;
2023 struct binder_node *node;
2024
2025 fp = to_flat_binder_object(hdr);
2026 node = binder_get_node(proc, fp->binder);
2027 if (node == NULL) {
2028 pr_err("transaction release %d bad node %016llx\n",
2029 debug_id, (u64)fp->binder);
2030 break;
2031 }
2032 binder_debug(BINDER_DEBUG_TRANSACTION,
2033 " node %d u%016llx\n",
2034 node->debug_id, (u64)node->ptr);
2035 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2036 0);
2037 binder_put_node(node);
2038 } break;
2039 case BINDER_TYPE_HANDLE:
2040 case BINDER_TYPE_WEAK_HANDLE: {
2041 struct flat_binder_object *fp;
2042 struct binder_ref_data rdata;
2043 int ret;
2044
2045 fp = to_flat_binder_object(hdr);
2046 ret = binder_dec_ref_for_handle(proc, fp->handle,
2047 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2048
2049 if (ret) {
2050 pr_err("transaction release %d bad handle %d, ret = %d\n",
2051 debug_id, fp->handle, ret);
2052 break;
2053 }
2054 binder_debug(BINDER_DEBUG_TRANSACTION,
2055 " ref %d desc %d\n",
2056 rdata.debug_id, rdata.desc);
2057 } break;
2058
2059 case BINDER_TYPE_FD: {
2060 /*
2061 * No need to close the file here since user-space
2062 * closes it for successfully delivered
2063 * transactions. For transactions that weren't
2064 * delivered, the new fd was never allocated so
2065 * there is no need to close and the fput on the
2066 * file is done when the transaction is torn
2067 * down.
2068 */
2069 } break;
2070 case BINDER_TYPE_PTR:
2071 /*
2072 * Nothing to do here, this will get cleaned up when the
2073 * transaction buffer gets freed
2074 */
2075 break;
2076 case BINDER_TYPE_FDA: {
2077 struct binder_fd_array_object *fda;
2078 struct binder_buffer_object *parent;
2079 struct binder_object ptr_object;
2080 binder_size_t fda_offset;
2081 size_t fd_index;
2082 binder_size_t fd_buf_size;
2083 binder_size_t num_valid;
2084
2085 if (is_failure) {
2086 /*
2087 * The fd fixups have not been applied so no
2088 * fds need to be closed.
2089 */
2090 continue;
2091 }
2092
2093 num_valid = (buffer_offset - off_start_offset) /
2094 sizeof(binder_size_t);
2095 fda = to_binder_fd_array_object(hdr);
2096 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2097 fda->parent,
2098 off_start_offset,
2099 NULL,
2100 num_valid);
2101 if (!parent) {
2102 pr_err("transaction release %d bad parent offset\n",
2103 debug_id);
2104 continue;
2105 }
2106 fd_buf_size = sizeof(u32) * fda->num_fds;
2107 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2108 pr_err("transaction release %d invalid number of fds (%lld)\n",
2109 debug_id, (u64)fda->num_fds);
2110 continue;
2111 }
2112 if (fd_buf_size > parent->length ||
2113 fda->parent_offset > parent->length - fd_buf_size) {
2114 /* No space for all file descriptors here. */
2115 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2116 debug_id, (u64)fda->num_fds);
2117 continue;
2118 }
2119 /*
2120 * the source data for binder_buffer_object is visible
2121 * to user-space and the @buffer element is the user
2122 * pointer to the buffer_object containing the fd_array.
2123 * Convert the address to an offset relative to
2124 * the base of the transaction buffer.
2125 */
2126 fda_offset =
2127 (parent->buffer - (uintptr_t)buffer->user_data) +
2128 fda->parent_offset;
2129 for (fd_index = 0; fd_index < fda->num_fds;
2130 fd_index++) {
2131 u32 fd;
2132 int err;
2133 binder_size_t offset = fda_offset +
2134 fd_index * sizeof(fd);
2135
2136 err = binder_alloc_copy_from_buffer(
2137 &proc->alloc, &fd, buffer,
2138 offset, sizeof(fd));
2139 WARN_ON(err);
2140 if (!err) {
2141 binder_deferred_fd_close(fd);
2142 /*
2143 * Need to make sure the thread goes
2144 * back to userspace to complete the
2145 * deferred close
2146 */
2147 if (thread)
2148 thread->looper_need_return = true;
2149 }
2150 }
2151 } break;
2152 default:
2153 pr_err("transaction release %d bad object type %x\n",
2154 debug_id, hdr->type);
2155 break;
2156 }
2157 }
2158 }
2159
2160 /* Clean up all the objects in the buffer */
binder_release_entire_buffer(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, bool is_failure)2161 static inline void binder_release_entire_buffer(struct binder_proc *proc,
2162 struct binder_thread *thread,
2163 struct binder_buffer *buffer,
2164 bool is_failure)
2165 {
2166 binder_size_t off_end_offset;
2167
2168 off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2169 off_end_offset += buffer->offsets_size;
2170
2171 binder_transaction_buffer_release(proc, thread, buffer,
2172 off_end_offset, is_failure);
2173 }
2174
binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread)2175 static int binder_translate_binder(struct flat_binder_object *fp,
2176 struct binder_transaction *t,
2177 struct binder_thread *thread)
2178 {
2179 struct binder_node *node;
2180 struct binder_proc *proc = thread->proc;
2181 struct binder_proc *target_proc = t->to_proc;
2182 struct binder_ref_data rdata;
2183 int ret = 0;
2184
2185 node = binder_get_node(proc, fp->binder);
2186 if (!node) {
2187 node = binder_new_node(proc, fp);
2188 if (!node)
2189 return -ENOMEM;
2190 }
2191 if (fp->cookie != node->cookie) {
2192 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2193 proc->pid, thread->pid, (u64)fp->binder,
2194 node->debug_id, (u64)fp->cookie,
2195 (u64)node->cookie);
2196 ret = -EINVAL;
2197 goto done;
2198 }
2199 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2200 ret = -EPERM;
2201 goto done;
2202 }
2203
2204 ret = binder_inc_ref_for_node(target_proc, node,
2205 fp->hdr.type == BINDER_TYPE_BINDER,
2206 &thread->todo, &rdata);
2207 if (ret)
2208 goto done;
2209
2210 if (fp->hdr.type == BINDER_TYPE_BINDER)
2211 fp->hdr.type = BINDER_TYPE_HANDLE;
2212 else
2213 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2214 fp->binder = 0;
2215 fp->handle = rdata.desc;
2216 fp->cookie = 0;
2217
2218 trace_binder_transaction_node_to_ref(t, node, &rdata);
2219 binder_debug(BINDER_DEBUG_TRANSACTION,
2220 " node %d u%016llx -> ref %d desc %d\n",
2221 node->debug_id, (u64)node->ptr,
2222 rdata.debug_id, rdata.desc);
2223 done:
2224 binder_put_node(node);
2225 return ret;
2226 }
2227
binder_translate_handle(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread)2228 static int binder_translate_handle(struct flat_binder_object *fp,
2229 struct binder_transaction *t,
2230 struct binder_thread *thread)
2231 {
2232 struct binder_proc *proc = thread->proc;
2233 struct binder_proc *target_proc = t->to_proc;
2234 struct binder_node *node;
2235 struct binder_ref_data src_rdata;
2236 int ret = 0;
2237
2238 node = binder_get_node_from_ref(proc, fp->handle,
2239 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2240 if (!node) {
2241 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2242 proc->pid, thread->pid, fp->handle);
2243 return -EINVAL;
2244 }
2245 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2246 ret = -EPERM;
2247 goto done;
2248 }
2249
2250 binder_node_lock(node);
2251 if (node->proc == target_proc) {
2252 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2253 fp->hdr.type = BINDER_TYPE_BINDER;
2254 else
2255 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2256 fp->binder = node->ptr;
2257 fp->cookie = node->cookie;
2258 if (node->proc)
2259 binder_inner_proc_lock(node->proc);
2260 else
2261 __acquire(&node->proc->inner_lock);
2262 binder_inc_node_nilocked(node,
2263 fp->hdr.type == BINDER_TYPE_BINDER,
2264 0, NULL);
2265 if (node->proc)
2266 binder_inner_proc_unlock(node->proc);
2267 else
2268 __release(&node->proc->inner_lock);
2269 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2270 binder_debug(BINDER_DEBUG_TRANSACTION,
2271 " ref %d desc %d -> node %d u%016llx\n",
2272 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2273 (u64)node->ptr);
2274 binder_node_unlock(node);
2275 } else {
2276 struct binder_ref_data dest_rdata;
2277
2278 binder_node_unlock(node);
2279 ret = binder_inc_ref_for_node(target_proc, node,
2280 fp->hdr.type == BINDER_TYPE_HANDLE,
2281 NULL, &dest_rdata);
2282 if (ret)
2283 goto done;
2284
2285 fp->binder = 0;
2286 fp->handle = dest_rdata.desc;
2287 fp->cookie = 0;
2288 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2289 &dest_rdata);
2290 binder_debug(BINDER_DEBUG_TRANSACTION,
2291 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2292 src_rdata.debug_id, src_rdata.desc,
2293 dest_rdata.debug_id, dest_rdata.desc,
2294 node->debug_id);
2295 }
2296 done:
2297 binder_put_node(node);
2298 return ret;
2299 }
2300
binder_translate_fd(u32 fd, binder_size_t fd_offset, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to)2301 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2302 struct binder_transaction *t,
2303 struct binder_thread *thread,
2304 struct binder_transaction *in_reply_to)
2305 {
2306 struct binder_proc *proc = thread->proc;
2307 struct binder_proc *target_proc = t->to_proc;
2308 struct binder_txn_fd_fixup *fixup;
2309 struct file *file;
2310 int ret = 0;
2311 bool target_allows_fd;
2312
2313 if (in_reply_to)
2314 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2315 else
2316 target_allows_fd = t->buffer->target_node->accept_fds;
2317 if (!target_allows_fd) {
2318 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2319 proc->pid, thread->pid,
2320 in_reply_to ? "reply" : "transaction",
2321 fd);
2322 ret = -EPERM;
2323 goto err_fd_not_accepted;
2324 }
2325
2326 file = fget(fd);
2327 if (!file) {
2328 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2329 proc->pid, thread->pid, fd);
2330 ret = -EBADF;
2331 goto err_fget;
2332 }
2333 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2334 if (ret < 0) {
2335 ret = -EPERM;
2336 goto err_security;
2337 }
2338
2339 /*
2340 * Add fixup record for this transaction. The allocation
2341 * of the fd in the target needs to be done from a
2342 * target thread.
2343 */
2344 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2345 if (!fixup) {
2346 ret = -ENOMEM;
2347 goto err_alloc;
2348 }
2349 fixup->file = file;
2350 fixup->offset = fd_offset;
2351 fixup->target_fd = -1;
2352 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2353 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2354
2355 return ret;
2356
2357 err_alloc:
2358 err_security:
2359 fput(file);
2360 err_fget:
2361 err_fd_not_accepted:
2362 return ret;
2363 }
2364
2365 /**
2366 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2367 * @offset offset in target buffer to fixup
2368 * @skip_size bytes to skip in copy (fixup will be written later)
2369 * @fixup_data data to write at fixup offset
2370 * @node list node
2371 *
2372 * This is used for the pointer fixup list (pf) which is created and consumed
2373 * during binder_transaction() and is only accessed locally. No
2374 * locking is necessary.
2375 *
2376 * The list is ordered by @offset.
2377 */
2378 struct binder_ptr_fixup {
2379 binder_size_t offset;
2380 size_t skip_size;
2381 binder_uintptr_t fixup_data;
2382 struct list_head node;
2383 };
2384
2385 /**
2386 * struct binder_sg_copy - scatter-gather data to be copied
2387 * @offset offset in target buffer
2388 * @sender_uaddr user address in source buffer
2389 * @length bytes to copy
2390 * @node list node
2391 *
2392 * This is used for the sg copy list (sgc) which is created and consumed
2393 * during binder_transaction() and is only accessed locally. No
2394 * locking is necessary.
2395 *
2396 * The list is ordered by @offset.
2397 */
2398 struct binder_sg_copy {
2399 binder_size_t offset;
2400 const void __user *sender_uaddr;
2401 size_t length;
2402 struct list_head node;
2403 };
2404
2405 /**
2406 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2407 * @alloc: binder_alloc associated with @buffer
2408 * @buffer: binder buffer in target process
2409 * @sgc_head: list_head of scatter-gather copy list
2410 * @pf_head: list_head of pointer fixup list
2411 *
2412 * Processes all elements of @sgc_head, applying fixups from @pf_head
2413 * and copying the scatter-gather data from the source process' user
2414 * buffer to the target's buffer. It is expected that the list creation
2415 * and processing all occurs during binder_transaction() so these lists
2416 * are only accessed in local context.
2417 *
2418 * Return: 0=success, else -errno
2419 */
binder_do_deferred_txn_copies(struct binder_alloc *alloc, struct binder_buffer *buffer, struct list_head *sgc_head, struct list_head *pf_head)2420 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2421 struct binder_buffer *buffer,
2422 struct list_head *sgc_head,
2423 struct list_head *pf_head)
2424 {
2425 int ret = 0;
2426 struct binder_sg_copy *sgc, *tmpsgc;
2427 struct binder_ptr_fixup *tmppf;
2428 struct binder_ptr_fixup *pf =
2429 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2430 node);
2431
2432 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2433 size_t bytes_copied = 0;
2434
2435 while (bytes_copied < sgc->length) {
2436 size_t copy_size;
2437 size_t bytes_left = sgc->length - bytes_copied;
2438 size_t offset = sgc->offset + bytes_copied;
2439
2440 /*
2441 * We copy up to the fixup (pointed to by pf)
2442 */
2443 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2444 : bytes_left;
2445 if (!ret && copy_size)
2446 ret = binder_alloc_copy_user_to_buffer(
2447 alloc, buffer,
2448 offset,
2449 sgc->sender_uaddr + bytes_copied,
2450 copy_size);
2451 bytes_copied += copy_size;
2452 if (copy_size != bytes_left) {
2453 BUG_ON(!pf);
2454 /* we stopped at a fixup offset */
2455 if (pf->skip_size) {
2456 /*
2457 * we are just skipping. This is for
2458 * BINDER_TYPE_FDA where the translated
2459 * fds will be fixed up when we get
2460 * to target context.
2461 */
2462 bytes_copied += pf->skip_size;
2463 } else {
2464 /* apply the fixup indicated by pf */
2465 if (!ret)
2466 ret = binder_alloc_copy_to_buffer(
2467 alloc, buffer,
2468 pf->offset,
2469 &pf->fixup_data,
2470 sizeof(pf->fixup_data));
2471 bytes_copied += sizeof(pf->fixup_data);
2472 }
2473 list_del(&pf->node);
2474 kfree(pf);
2475 pf = list_first_entry_or_null(pf_head,
2476 struct binder_ptr_fixup, node);
2477 }
2478 }
2479 list_del(&sgc->node);
2480 kfree(sgc);
2481 }
2482 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2483 BUG_ON(pf->skip_size == 0);
2484 list_del(&pf->node);
2485 kfree(pf);
2486 }
2487 BUG_ON(!list_empty(sgc_head));
2488
2489 return ret > 0 ? -EINVAL : ret;
2490 }
2491
2492 /**
2493 * binder_cleanup_deferred_txn_lists() - free specified lists
2494 * @sgc_head: list_head of scatter-gather copy list
2495 * @pf_head: list_head of pointer fixup list
2496 *
2497 * Called to clean up @sgc_head and @pf_head if there is an
2498 * error.
2499 */
binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, struct list_head *pf_head)2500 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2501 struct list_head *pf_head)
2502 {
2503 struct binder_sg_copy *sgc, *tmpsgc;
2504 struct binder_ptr_fixup *pf, *tmppf;
2505
2506 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2507 list_del(&sgc->node);
2508 kfree(sgc);
2509 }
2510 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2511 list_del(&pf->node);
2512 kfree(pf);
2513 }
2514 }
2515
2516 /**
2517 * binder_defer_copy() - queue a scatter-gather buffer for copy
2518 * @sgc_head: list_head of scatter-gather copy list
2519 * @offset: binder buffer offset in target process
2520 * @sender_uaddr: user address in source process
2521 * @length: bytes to copy
2522 *
2523 * Specify a scatter-gather block to be copied. The actual copy must
2524 * be deferred until all the needed fixups are identified and queued.
2525 * Then the copy and fixups are done together so un-translated values
2526 * from the source are never visible in the target buffer.
2527 *
2528 * We are guaranteed that repeated calls to this function will have
2529 * monotonically increasing @offset values so the list will naturally
2530 * be ordered.
2531 *
2532 * Return: 0=success, else -errno
2533 */
binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, const void __user *sender_uaddr, size_t length)2534 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2535 const void __user *sender_uaddr, size_t length)
2536 {
2537 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2538
2539 if (!bc)
2540 return -ENOMEM;
2541
2542 bc->offset = offset;
2543 bc->sender_uaddr = sender_uaddr;
2544 bc->length = length;
2545 INIT_LIST_HEAD(&bc->node);
2546
2547 /*
2548 * We are guaranteed that the deferred copies are in-order
2549 * so just add to the tail.
2550 */
2551 list_add_tail(&bc->node, sgc_head);
2552
2553 return 0;
2554 }
2555
2556 /**
2557 * binder_add_fixup() - queue a fixup to be applied to sg copy
2558 * @pf_head: list_head of binder ptr fixup list
2559 * @offset: binder buffer offset in target process
2560 * @fixup: bytes to be copied for fixup
2561 * @skip_size: bytes to skip when copying (fixup will be applied later)
2562 *
2563 * Add the specified fixup to a list ordered by @offset. When copying
2564 * the scatter-gather buffers, the fixup will be copied instead of
2565 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2566 * will be applied later (in target process context), so we just skip
2567 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2568 * value in @fixup.
2569 *
2570 * This function is called *mostly* in @offset order, but there are
2571 * exceptions. Since out-of-order inserts are relatively uncommon,
2572 * we insert the new element by searching backward from the tail of
2573 * the list.
2574 *
2575 * Return: 0=success, else -errno
2576 */
binder_add_fixup(struct list_head *pf_head, binder_size_t offset, binder_uintptr_t fixup, size_t skip_size)2577 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2578 binder_uintptr_t fixup, size_t skip_size)
2579 {
2580 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2581 struct binder_ptr_fixup *tmppf;
2582
2583 if (!pf)
2584 return -ENOMEM;
2585
2586 pf->offset = offset;
2587 pf->fixup_data = fixup;
2588 pf->skip_size = skip_size;
2589 INIT_LIST_HEAD(&pf->node);
2590
2591 /* Fixups are *mostly* added in-order, but there are some
2592 * exceptions. Look backwards through list for insertion point.
2593 */
2594 list_for_each_entry_reverse(tmppf, pf_head, node) {
2595 if (tmppf->offset < pf->offset) {
2596 list_add(&pf->node, &tmppf->node);
2597 return 0;
2598 }
2599 }
2600 /*
2601 * if we get here, then the new offset is the lowest so
2602 * insert at the head
2603 */
2604 list_add(&pf->node, pf_head);
2605 return 0;
2606 }
2607
binder_translate_fd_array(struct list_head *pf_head, struct binder_fd_array_object *fda, const void __user *sender_ubuffer, struct binder_buffer_object *parent, struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to)2608 static int binder_translate_fd_array(struct list_head *pf_head,
2609 struct binder_fd_array_object *fda,
2610 const void __user *sender_ubuffer,
2611 struct binder_buffer_object *parent,
2612 struct binder_buffer_object *sender_uparent,
2613 struct binder_transaction *t,
2614 struct binder_thread *thread,
2615 struct binder_transaction *in_reply_to)
2616 {
2617 binder_size_t fdi, fd_buf_size;
2618 binder_size_t fda_offset;
2619 const void __user *sender_ufda_base;
2620 struct binder_proc *proc = thread->proc;
2621 int ret;
2622
2623 if (fda->num_fds == 0)
2624 return 0;
2625
2626 fd_buf_size = sizeof(u32) * fda->num_fds;
2627 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2628 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2629 proc->pid, thread->pid, (u64)fda->num_fds);
2630 return -EINVAL;
2631 }
2632 if (fd_buf_size > parent->length ||
2633 fda->parent_offset > parent->length - fd_buf_size) {
2634 /* No space for all file descriptors here. */
2635 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2636 proc->pid, thread->pid, (u64)fda->num_fds);
2637 return -EINVAL;
2638 }
2639 /*
2640 * the source data for binder_buffer_object is visible
2641 * to user-space and the @buffer element is the user
2642 * pointer to the buffer_object containing the fd_array.
2643 * Convert the address to an offset relative to
2644 * the base of the transaction buffer.
2645 */
2646 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2647 fda->parent_offset;
2648 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2649 fda->parent_offset;
2650
2651 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2652 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2653 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2654 proc->pid, thread->pid);
2655 return -EINVAL;
2656 }
2657 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2658 if (ret)
2659 return ret;
2660
2661 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2662 u32 fd;
2663 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2664 binder_size_t sender_uoffset = fdi * sizeof(fd);
2665
2666 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2667 if (!ret)
2668 ret = binder_translate_fd(fd, offset, t, thread,
2669 in_reply_to);
2670 if (ret)
2671 return ret > 0 ? -EINVAL : ret;
2672 }
2673 return 0;
2674 }
2675
binder_fixup_parent(struct list_head *pf_head, struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, binder_size_t num_valid, binder_size_t last_fixup_obj_off, binder_size_t last_fixup_min_off)2676 static int binder_fixup_parent(struct list_head *pf_head,
2677 struct binder_transaction *t,
2678 struct binder_thread *thread,
2679 struct binder_buffer_object *bp,
2680 binder_size_t off_start_offset,
2681 binder_size_t num_valid,
2682 binder_size_t last_fixup_obj_off,
2683 binder_size_t last_fixup_min_off)
2684 {
2685 struct binder_buffer_object *parent;
2686 struct binder_buffer *b = t->buffer;
2687 struct binder_proc *proc = thread->proc;
2688 struct binder_proc *target_proc = t->to_proc;
2689 struct binder_object object;
2690 binder_size_t buffer_offset;
2691 binder_size_t parent_offset;
2692
2693 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2694 return 0;
2695
2696 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2697 off_start_offset, &parent_offset,
2698 num_valid);
2699 if (!parent) {
2700 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2701 proc->pid, thread->pid);
2702 return -EINVAL;
2703 }
2704
2705 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2706 parent_offset, bp->parent_offset,
2707 last_fixup_obj_off,
2708 last_fixup_min_off)) {
2709 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2710 proc->pid, thread->pid);
2711 return -EINVAL;
2712 }
2713
2714 if (parent->length < sizeof(binder_uintptr_t) ||
2715 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2716 /* No space for a pointer here! */
2717 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2718 proc->pid, thread->pid);
2719 return -EINVAL;
2720 }
2721 buffer_offset = bp->parent_offset +
2722 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2723 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2724 }
2725
2726 /**
2727 * binder_can_update_transaction() - Can a txn be superseded by an updated one?
2728 * @t1: the pending async txn in the frozen process
2729 * @t2: the new async txn to supersede the outdated pending one
2730 *
2731 * Return: true if t2 can supersede t1
2732 * false if t2 can not supersede t1
2733 */
binder_can_update_transaction(struct binder_transaction *t1, struct binder_transaction *t2)2734 static bool binder_can_update_transaction(struct binder_transaction *t1,
2735 struct binder_transaction *t2)
2736 {
2737 if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) !=
2738 (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc)
2739 return false;
2740 if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code &&
2741 t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid &&
2742 t1->buffer->target_node->ptr == t2->buffer->target_node->ptr &&
2743 t1->buffer->target_node->cookie == t2->buffer->target_node->cookie)
2744 return true;
2745 return false;
2746 }
2747
2748 /**
2749 * binder_find_outdated_transaction_ilocked() - Find the outdated transaction
2750 * @t: new async transaction
2751 * @target_list: list to find outdated transaction
2752 *
2753 * Return: the outdated transaction if found
2754 * NULL if no outdated transacton can be found
2755 *
2756 * Requires the proc->inner_lock to be held.
2757 */
2758 static struct binder_transaction *
binder_find_outdated_transaction_ilocked(struct binder_transaction *t, struct list_head *target_list)2759 binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
2760 struct list_head *target_list)
2761 {
2762 struct binder_work *w;
2763
2764 list_for_each_entry(w, target_list, entry) {
2765 struct binder_transaction *t_queued;
2766
2767 if (w->type != BINDER_WORK_TRANSACTION)
2768 continue;
2769 t_queued = container_of(w, struct binder_transaction, work);
2770 if (binder_can_update_transaction(t_queued, t))
2771 return t_queued;
2772 }
2773 return NULL;
2774 }
2775
2776 /**
2777 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2778 * @t: transaction to send
2779 * @proc: process to send the transaction to
2780 * @thread: thread in @proc to send the transaction to (may be NULL)
2781 *
2782 * This function queues a transaction to the specified process. It will try
2783 * to find a thread in the target process to handle the transaction and
2784 * wake it up. If no thread is found, the work is queued to the proc
2785 * waitqueue.
2786 *
2787 * If the @thread parameter is not NULL, the transaction is always queued
2788 * to the waitlist of that specific thread.
2789 *
2790 * Return: 0 if the transaction was successfully queued
2791 * BR_DEAD_REPLY if the target process or thread is dead
2792 * BR_FROZEN_REPLY if the target process or thread is frozen and
2793 * the sync transaction was rejected
2794 * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
2795 * and the async transaction was successfully queued
2796 */
binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread)2797 static int binder_proc_transaction(struct binder_transaction *t,
2798 struct binder_proc *proc,
2799 struct binder_thread *thread)
2800 {
2801 struct binder_node *node = t->buffer->target_node;
2802 bool oneway = !!(t->flags & TF_ONE_WAY);
2803 bool pending_async = false;
2804 struct binder_transaction *t_outdated = NULL;
2805 bool frozen = false;
2806
2807 BUG_ON(!node);
2808 binder_node_lock(node);
2809 if (oneway) {
2810 BUG_ON(thread);
2811 if (node->has_async_transaction)
2812 pending_async = true;
2813 else
2814 node->has_async_transaction = true;
2815 }
2816
2817 binder_inner_proc_lock(proc);
2818 if (proc->is_frozen) {
2819 frozen = true;
2820 proc->sync_recv |= !oneway;
2821 proc->async_recv |= oneway;
2822 }
2823
2824 if ((frozen && !oneway) || proc->is_dead ||
2825 (thread && thread->is_dead)) {
2826 binder_inner_proc_unlock(proc);
2827 binder_node_unlock(node);
2828 return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2829 }
2830
2831 if (!thread && !pending_async)
2832 thread = binder_select_thread_ilocked(proc);
2833
2834 if (thread) {
2835 binder_enqueue_thread_work_ilocked(thread, &t->work);
2836 } else if (!pending_async) {
2837 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2838 } else {
2839 if ((t->flags & TF_UPDATE_TXN) && frozen) {
2840 t_outdated = binder_find_outdated_transaction_ilocked(t,
2841 &node->async_todo);
2842 if (t_outdated) {
2843 binder_debug(BINDER_DEBUG_TRANSACTION,
2844 "txn %d supersedes %d\n",
2845 t->debug_id, t_outdated->debug_id);
2846 list_del_init(&t_outdated->work.entry);
2847 proc->outstanding_txns--;
2848 }
2849 }
2850 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2851 }
2852
2853 if (!pending_async)
2854 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2855
2856 proc->outstanding_txns++;
2857 binder_inner_proc_unlock(proc);
2858 binder_node_unlock(node);
2859
2860 /*
2861 * To reduce potential contention, free the outdated transaction and
2862 * buffer after releasing the locks.
2863 */
2864 if (t_outdated) {
2865 struct binder_buffer *buffer = t_outdated->buffer;
2866
2867 t_outdated->buffer = NULL;
2868 buffer->transaction = NULL;
2869 trace_binder_transaction_update_buffer_release(buffer);
2870 binder_release_entire_buffer(proc, NULL, buffer, false);
2871 binder_alloc_free_buf(&proc->alloc, buffer);
2872 kfree(t_outdated);
2873 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2874 }
2875
2876 if (oneway && frozen)
2877 return BR_TRANSACTION_PENDING_FROZEN;
2878
2879 return 0;
2880 }
2881
2882 /**
2883 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2884 * @node: struct binder_node for which to get refs
2885 * @procp: returns @node->proc if valid
2886 * @error: if no @procp then returns BR_DEAD_REPLY
2887 *
2888 * User-space normally keeps the node alive when creating a transaction
2889 * since it has a reference to the target. The local strong ref keeps it
2890 * alive if the sending process dies before the target process processes
2891 * the transaction. If the source process is malicious or has a reference
2892 * counting bug, relying on the local strong ref can fail.
2893 *
2894 * Since user-space can cause the local strong ref to go away, we also take
2895 * a tmpref on the node to ensure it survives while we are constructing
2896 * the transaction. We also need a tmpref on the proc while we are
2897 * constructing the transaction, so we take that here as well.
2898 *
2899 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2900 * Also sets @procp if valid. If the @node->proc is NULL indicating that the
2901 * target proc has died, @error is set to BR_DEAD_REPLY.
2902 */
binder_get_node_refs_for_txn( struct binder_node *node, struct binder_proc **procp, uint32_t *error)2903 static struct binder_node *binder_get_node_refs_for_txn(
2904 struct binder_node *node,
2905 struct binder_proc **procp,
2906 uint32_t *error)
2907 {
2908 struct binder_node *target_node = NULL;
2909
2910 binder_node_inner_lock(node);
2911 if (node->proc) {
2912 target_node = node;
2913 binder_inc_node_nilocked(node, 1, 0, NULL);
2914 binder_inc_node_tmpref_ilocked(node);
2915 node->proc->tmp_ref++;
2916 *procp = node->proc;
2917 } else
2918 *error = BR_DEAD_REPLY;
2919 binder_node_inner_unlock(node);
2920
2921 return target_node;
2922 }
2923
binder_set_txn_from_error(struct binder_transaction *t, int id, uint32_t command, int32_t param)2924 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2925 uint32_t command, int32_t param)
2926 {
2927 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2928
2929 if (!from) {
2930 /* annotation for sparse */
2931 __release(&from->proc->inner_lock);
2932 return;
2933 }
2934
2935 /* don't override existing errors */
2936 if (from->ee.command == BR_OK)
2937 binder_set_extended_error(&from->ee, id, command, param);
2938 binder_inner_proc_unlock(from->proc);
2939 binder_thread_dec_tmpref(from);
2940 }
2941
binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, binder_size_t extra_buffers_size)2942 static void binder_transaction(struct binder_proc *proc,
2943 struct binder_thread *thread,
2944 struct binder_transaction_data *tr, int reply,
2945 binder_size_t extra_buffers_size)
2946 {
2947 int ret;
2948 struct binder_transaction *t;
2949 struct binder_work *w;
2950 struct binder_work *tcomplete;
2951 binder_size_t buffer_offset = 0;
2952 binder_size_t off_start_offset, off_end_offset;
2953 binder_size_t off_min;
2954 binder_size_t sg_buf_offset, sg_buf_end_offset;
2955 binder_size_t user_offset = 0;
2956 struct binder_proc *target_proc = NULL;
2957 struct binder_thread *target_thread = NULL;
2958 struct binder_node *target_node = NULL;
2959 struct binder_transaction *in_reply_to = NULL;
2960 struct binder_transaction_log_entry *e;
2961 uint32_t return_error = 0;
2962 uint32_t return_error_param = 0;
2963 uint32_t return_error_line = 0;
2964 binder_size_t last_fixup_obj_off = 0;
2965 binder_size_t last_fixup_min_off = 0;
2966 struct binder_context *context = proc->context;
2967 int t_debug_id = atomic_inc_return(&binder_last_id);
2968 ktime_t t_start_time = ktime_get();
2969 char *secctx = NULL;
2970 u32 secctx_sz = 0;
2971 struct list_head sgc_head;
2972 struct list_head pf_head;
2973 const void __user *user_buffer = (const void __user *)
2974 (uintptr_t)tr->data.ptr.buffer;
2975 INIT_LIST_HEAD(&sgc_head);
2976 INIT_LIST_HEAD(&pf_head);
2977
2978 e = binder_transaction_log_add(&binder_transaction_log);
2979 e->debug_id = t_debug_id;
2980 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2981 e->from_proc = proc->pid;
2982 e->from_thread = thread->pid;
2983 e->target_handle = tr->target.handle;
2984 e->data_size = tr->data_size;
2985 e->offsets_size = tr->offsets_size;
2986 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2987
2988 binder_inner_proc_lock(proc);
2989 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2990 binder_inner_proc_unlock(proc);
2991
2992 if (reply) {
2993 binder_inner_proc_lock(proc);
2994 in_reply_to = thread->transaction_stack;
2995 if (in_reply_to == NULL) {
2996 binder_inner_proc_unlock(proc);
2997 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2998 proc->pid, thread->pid);
2999 return_error = BR_FAILED_REPLY;
3000 return_error_param = -EPROTO;
3001 return_error_line = __LINE__;
3002 goto err_empty_call_stack;
3003 }
3004 if (in_reply_to->to_thread != thread) {
3005 spin_lock(&in_reply_to->lock);
3006 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3007 proc->pid, thread->pid, in_reply_to->debug_id,
3008 in_reply_to->to_proc ?
3009 in_reply_to->to_proc->pid : 0,
3010 in_reply_to->to_thread ?
3011 in_reply_to->to_thread->pid : 0);
3012 spin_unlock(&in_reply_to->lock);
3013 binder_inner_proc_unlock(proc);
3014 return_error = BR_FAILED_REPLY;
3015 return_error_param = -EPROTO;
3016 return_error_line = __LINE__;
3017 in_reply_to = NULL;
3018 goto err_bad_call_stack;
3019 }
3020 thread->transaction_stack = in_reply_to->to_parent;
3021 binder_inner_proc_unlock(proc);
3022 binder_set_nice(in_reply_to->saved_priority);
3023 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3024 if (target_thread == NULL) {
3025 /* annotation for sparse */
3026 __release(&target_thread->proc->inner_lock);
3027 binder_txn_error("%d:%d reply target not found\n",
3028 thread->pid, proc->pid);
3029 return_error = BR_DEAD_REPLY;
3030 return_error_line = __LINE__;
3031 goto err_dead_binder;
3032 }
3033 if (target_thread->transaction_stack != in_reply_to) {
3034 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3035 proc->pid, thread->pid,
3036 target_thread->transaction_stack ?
3037 target_thread->transaction_stack->debug_id : 0,
3038 in_reply_to->debug_id);
3039 binder_inner_proc_unlock(target_thread->proc);
3040 return_error = BR_FAILED_REPLY;
3041 return_error_param = -EPROTO;
3042 return_error_line = __LINE__;
3043 in_reply_to = NULL;
3044 target_thread = NULL;
3045 goto err_dead_binder;
3046 }
3047 target_proc = target_thread->proc;
3048 target_proc->tmp_ref++;
3049 binder_inner_proc_unlock(target_thread->proc);
3050 } else {
3051 if (tr->target.handle) {
3052 struct binder_ref *ref;
3053
3054 /*
3055 * There must already be a strong ref
3056 * on this node. If so, do a strong
3057 * increment on the node to ensure it
3058 * stays alive until the transaction is
3059 * done.
3060 */
3061 binder_proc_lock(proc);
3062 ref = binder_get_ref_olocked(proc, tr->target.handle,
3063 true);
3064 if (ref) {
3065 target_node = binder_get_node_refs_for_txn(
3066 ref->node, &target_proc,
3067 &return_error);
3068 } else {
3069 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
3070 proc->pid, thread->pid, tr->target.handle);
3071 return_error = BR_FAILED_REPLY;
3072 }
3073 binder_proc_unlock(proc);
3074 } else {
3075 mutex_lock(&context->context_mgr_node_lock);
3076 target_node = context->binder_context_mgr_node;
3077 if (target_node)
3078 target_node = binder_get_node_refs_for_txn(
3079 target_node, &target_proc,
3080 &return_error);
3081 else
3082 return_error = BR_DEAD_REPLY;
3083 mutex_unlock(&context->context_mgr_node_lock);
3084 if (target_node && target_proc->pid == proc->pid) {
3085 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3086 proc->pid, thread->pid);
3087 return_error = BR_FAILED_REPLY;
3088 return_error_param = -EINVAL;
3089 return_error_line = __LINE__;
3090 goto err_invalid_target_handle;
3091 }
3092 }
3093 if (!target_node) {
3094 binder_txn_error("%d:%d cannot find target node\n",
3095 thread->pid, proc->pid);
3096 /*
3097 * return_error is set above
3098 */
3099 return_error_param = -EINVAL;
3100 return_error_line = __LINE__;
3101 goto err_dead_binder;
3102 }
3103 e->to_node = target_node->debug_id;
3104 if (WARN_ON(proc == target_proc)) {
3105 binder_txn_error("%d:%d self transactions not allowed\n",
3106 thread->pid, proc->pid);
3107 return_error = BR_FAILED_REPLY;
3108 return_error_param = -EINVAL;
3109 return_error_line = __LINE__;
3110 goto err_invalid_target_handle;
3111 }
3112 if (security_binder_transaction(proc->cred,
3113 target_proc->cred) < 0) {
3114 binder_txn_error("%d:%d transaction credentials failed\n",
3115 thread->pid, proc->pid);
3116 return_error = BR_FAILED_REPLY;
3117 return_error_param = -EPERM;
3118 return_error_line = __LINE__;
3119 goto err_invalid_target_handle;
3120 }
3121 binder_inner_proc_lock(proc);
3122
3123 w = list_first_entry_or_null(&thread->todo,
3124 struct binder_work, entry);
3125 if (!(tr->flags & TF_ONE_WAY) && w &&
3126 w->type == BINDER_WORK_TRANSACTION) {
3127 /*
3128 * Do not allow new outgoing transaction from a
3129 * thread that has a transaction at the head of
3130 * its todo list. Only need to check the head
3131 * because binder_select_thread_ilocked picks a
3132 * thread from proc->waiting_threads to enqueue
3133 * the transaction, and nothing is queued to the
3134 * todo list while the thread is on waiting_threads.
3135 */
3136 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3137 proc->pid, thread->pid);
3138 binder_inner_proc_unlock(proc);
3139 return_error = BR_FAILED_REPLY;
3140 return_error_param = -EPROTO;
3141 return_error_line = __LINE__;
3142 goto err_bad_todo_list;
3143 }
3144
3145 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3146 struct binder_transaction *tmp;
3147
3148 tmp = thread->transaction_stack;
3149 if (tmp->to_thread != thread) {
3150 spin_lock(&tmp->lock);
3151 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3152 proc->pid, thread->pid, tmp->debug_id,
3153 tmp->to_proc ? tmp->to_proc->pid : 0,
3154 tmp->to_thread ?
3155 tmp->to_thread->pid : 0);
3156 spin_unlock(&tmp->lock);
3157 binder_inner_proc_unlock(proc);
3158 return_error = BR_FAILED_REPLY;
3159 return_error_param = -EPROTO;
3160 return_error_line = __LINE__;
3161 goto err_bad_call_stack;
3162 }
3163 while (tmp) {
3164 struct binder_thread *from;
3165
3166 spin_lock(&tmp->lock);
3167 from = tmp->from;
3168 if (from && from->proc == target_proc) {
3169 atomic_inc(&from->tmp_ref);
3170 target_thread = from;
3171 spin_unlock(&tmp->lock);
3172 break;
3173 }
3174 spin_unlock(&tmp->lock);
3175 tmp = tmp->from_parent;
3176 }
3177 }
3178 binder_inner_proc_unlock(proc);
3179 }
3180 if (target_thread)
3181 e->to_thread = target_thread->pid;
3182 e->to_proc = target_proc->pid;
3183
3184 /* TODO: reuse incoming transaction for reply */
3185 t = kzalloc(sizeof(*t), GFP_KERNEL);
3186 if (t == NULL) {
3187 binder_txn_error("%d:%d cannot allocate transaction\n",
3188 thread->pid, proc->pid);
3189 return_error = BR_FAILED_REPLY;
3190 return_error_param = -ENOMEM;
3191 return_error_line = __LINE__;
3192 goto err_alloc_t_failed;
3193 }
3194 INIT_LIST_HEAD(&t->fd_fixups);
3195 binder_stats_created(BINDER_STAT_TRANSACTION);
3196 spin_lock_init(&t->lock);
3197
3198 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3199 if (tcomplete == NULL) {
3200 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3201 thread->pid, proc->pid);
3202 return_error = BR_FAILED_REPLY;
3203 return_error_param = -ENOMEM;
3204 return_error_line = __LINE__;
3205 goto err_alloc_tcomplete_failed;
3206 }
3207 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3208
3209 t->debug_id = t_debug_id;
3210 t->start_time = t_start_time;
3211
3212 if (reply)
3213 binder_debug(BINDER_DEBUG_TRANSACTION,
3214 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3215 proc->pid, thread->pid, t->debug_id,
3216 target_proc->pid, target_thread->pid,
3217 (u64)tr->data.ptr.buffer,
3218 (u64)tr->data.ptr.offsets,
3219 (u64)tr->data_size, (u64)tr->offsets_size,
3220 (u64)extra_buffers_size);
3221 else
3222 binder_debug(BINDER_DEBUG_TRANSACTION,
3223 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3224 proc->pid, thread->pid, t->debug_id,
3225 target_proc->pid, target_node->debug_id,
3226 (u64)tr->data.ptr.buffer,
3227 (u64)tr->data.ptr.offsets,
3228 (u64)tr->data_size, (u64)tr->offsets_size,
3229 (u64)extra_buffers_size);
3230
3231 if (!reply && !(tr->flags & TF_ONE_WAY)) {
3232 t->from = thread;
3233 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3234 t->from_pid = -1;
3235 t->from_tid = -1;
3236 #endif
3237 } else {
3238 t->from = NULL;
3239 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3240 t->from_pid = thread->proc->pid;
3241 t->from_tid = thread->pid;
3242 #endif
3243 }
3244
3245 t->sender_euid = task_euid(proc->tsk);
3246 #ifdef CONFIG_ACCESS_TOKENID
3247 t->sender_tokenid = current->token;
3248 t->first_tokenid = current->ftoken;
3249 #endif /* CONFIG_ACCESS_TOKENID */
3250 t->to_proc = target_proc;
3251 t->to_thread = target_thread;
3252 t->code = tr->code;
3253 t->flags = tr->flags;
3254 t->priority = task_nice(current);
3255
3256 if (target_node && target_node->txn_security_ctx) {
3257 u32 secid;
3258 size_t added_size;
3259
3260 security_cred_getsecid(proc->cred, &secid);
3261 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3262 if (ret) {
3263 binder_txn_error("%d:%d failed to get security context\n",
3264 thread->pid, proc->pid);
3265 return_error = BR_FAILED_REPLY;
3266 return_error_param = ret;
3267 return_error_line = __LINE__;
3268 goto err_get_secctx_failed;
3269 }
3270 added_size = ALIGN(secctx_sz, sizeof(u64));
3271 extra_buffers_size += added_size;
3272 if (extra_buffers_size < added_size) {
3273 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3274 thread->pid, proc->pid);
3275 return_error = BR_FAILED_REPLY;
3276 return_error_param = -EINVAL;
3277 return_error_line = __LINE__;
3278 goto err_bad_extra_size;
3279 }
3280 }
3281
3282 trace_binder_transaction(reply, t, target_node);
3283
3284 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3285 tr->offsets_size, extra_buffers_size,
3286 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3287 if (IS_ERR(t->buffer)) {
3288 char *s;
3289
3290 ret = PTR_ERR(t->buffer);
3291 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3292 : (ret == -ENOSPC) ? ": no space left"
3293 : (ret == -ENOMEM) ? ": memory allocation failed"
3294 : "";
3295 binder_txn_error("cannot allocate buffer%s", s);
3296
3297 return_error_param = PTR_ERR(t->buffer);
3298 return_error = return_error_param == -ESRCH ?
3299 BR_DEAD_REPLY : BR_FAILED_REPLY;
3300 return_error_line = __LINE__;
3301 t->buffer = NULL;
3302 goto err_binder_alloc_buf_failed;
3303 }
3304 if (secctx) {
3305 int err;
3306 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3307 ALIGN(tr->offsets_size, sizeof(void *)) +
3308 ALIGN(extra_buffers_size, sizeof(void *)) -
3309 ALIGN(secctx_sz, sizeof(u64));
3310
3311 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3312 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3313 t->buffer, buf_offset,
3314 secctx, secctx_sz);
3315 if (err) {
3316 t->security_ctx = 0;
3317 WARN_ON(1);
3318 }
3319 security_release_secctx(secctx, secctx_sz);
3320 secctx = NULL;
3321 }
3322 t->buffer->debug_id = t->debug_id;
3323 t->buffer->transaction = t;
3324 t->buffer->target_node = target_node;
3325 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3326 trace_binder_transaction_alloc_buf(t->buffer);
3327
3328 if (binder_alloc_copy_user_to_buffer(
3329 &target_proc->alloc,
3330 t->buffer,
3331 ALIGN(tr->data_size, sizeof(void *)),
3332 (const void __user *)
3333 (uintptr_t)tr->data.ptr.offsets,
3334 tr->offsets_size)) {
3335 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3336 proc->pid, thread->pid);
3337 return_error = BR_FAILED_REPLY;
3338 return_error_param = -EFAULT;
3339 return_error_line = __LINE__;
3340 goto err_copy_data_failed;
3341 }
3342 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3343 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3344 proc->pid, thread->pid, (u64)tr->offsets_size);
3345 return_error = BR_FAILED_REPLY;
3346 return_error_param = -EINVAL;
3347 return_error_line = __LINE__;
3348 goto err_bad_offset;
3349 }
3350 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3351 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3352 proc->pid, thread->pid,
3353 (u64)extra_buffers_size);
3354 return_error = BR_FAILED_REPLY;
3355 return_error_param = -EINVAL;
3356 return_error_line = __LINE__;
3357 goto err_bad_offset;
3358 }
3359 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3360 buffer_offset = off_start_offset;
3361 off_end_offset = off_start_offset + tr->offsets_size;
3362 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3363 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3364 ALIGN(secctx_sz, sizeof(u64));
3365 off_min = 0;
3366 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3367 buffer_offset += sizeof(binder_size_t)) {
3368 struct binder_object_header *hdr;
3369 size_t object_size;
3370 struct binder_object object;
3371 binder_size_t object_offset;
3372 binder_size_t copy_size;
3373
3374 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3375 &object_offset,
3376 t->buffer,
3377 buffer_offset,
3378 sizeof(object_offset))) {
3379 binder_txn_error("%d:%d copy offset from buffer failed\n",
3380 thread->pid, proc->pid);
3381 return_error = BR_FAILED_REPLY;
3382 return_error_param = -EINVAL;
3383 return_error_line = __LINE__;
3384 goto err_bad_offset;
3385 }
3386
3387 /*
3388 * Copy the source user buffer up to the next object
3389 * that will be processed.
3390 */
3391 copy_size = object_offset - user_offset;
3392 if (copy_size && (user_offset > object_offset ||
3393 binder_alloc_copy_user_to_buffer(
3394 &target_proc->alloc,
3395 t->buffer, user_offset,
3396 user_buffer + user_offset,
3397 copy_size))) {
3398 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3399 proc->pid, thread->pid);
3400 return_error = BR_FAILED_REPLY;
3401 return_error_param = -EFAULT;
3402 return_error_line = __LINE__;
3403 goto err_copy_data_failed;
3404 }
3405 object_size = binder_get_object(target_proc, user_buffer,
3406 t->buffer, object_offset, &object);
3407 if (object_size == 0 || object_offset < off_min) {
3408 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3409 proc->pid, thread->pid,
3410 (u64)object_offset,
3411 (u64)off_min,
3412 (u64)t->buffer->data_size);
3413 return_error = BR_FAILED_REPLY;
3414 return_error_param = -EINVAL;
3415 return_error_line = __LINE__;
3416 goto err_bad_offset;
3417 }
3418 /*
3419 * Set offset to the next buffer fragment to be
3420 * copied
3421 */
3422 user_offset = object_offset + object_size;
3423
3424 hdr = &object.hdr;
3425 off_min = object_offset + object_size;
3426 switch (hdr->type) {
3427 case BINDER_TYPE_BINDER:
3428 case BINDER_TYPE_WEAK_BINDER: {
3429 struct flat_binder_object *fp;
3430
3431 fp = to_flat_binder_object(hdr);
3432 ret = binder_translate_binder(fp, t, thread);
3433
3434 if (ret < 0 ||
3435 binder_alloc_copy_to_buffer(&target_proc->alloc,
3436 t->buffer,
3437 object_offset,
3438 fp, sizeof(*fp))) {
3439 binder_txn_error("%d:%d translate binder failed\n",
3440 thread->pid, proc->pid);
3441 return_error = BR_FAILED_REPLY;
3442 return_error_param = ret;
3443 return_error_line = __LINE__;
3444 goto err_translate_failed;
3445 }
3446 } break;
3447 case BINDER_TYPE_HANDLE:
3448 case BINDER_TYPE_WEAK_HANDLE: {
3449 struct flat_binder_object *fp;
3450
3451 fp = to_flat_binder_object(hdr);
3452 ret = binder_translate_handle(fp, t, thread);
3453 if (ret < 0 ||
3454 binder_alloc_copy_to_buffer(&target_proc->alloc,
3455 t->buffer,
3456 object_offset,
3457 fp, sizeof(*fp))) {
3458 binder_txn_error("%d:%d translate handle failed\n",
3459 thread->pid, proc->pid);
3460 return_error = BR_FAILED_REPLY;
3461 return_error_param = ret;
3462 return_error_line = __LINE__;
3463 goto err_translate_failed;
3464 }
3465 } break;
3466
3467 case BINDER_TYPE_FD: {
3468 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3469 binder_size_t fd_offset = object_offset +
3470 (uintptr_t)&fp->fd - (uintptr_t)fp;
3471 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3472 thread, in_reply_to);
3473
3474 fp->pad_binder = 0;
3475 if (ret < 0 ||
3476 binder_alloc_copy_to_buffer(&target_proc->alloc,
3477 t->buffer,
3478 object_offset,
3479 fp, sizeof(*fp))) {
3480 binder_txn_error("%d:%d translate fd failed\n",
3481 thread->pid, proc->pid);
3482 return_error = BR_FAILED_REPLY;
3483 return_error_param = ret;
3484 return_error_line = __LINE__;
3485 goto err_translate_failed;
3486 }
3487 } break;
3488 case BINDER_TYPE_FDA: {
3489 struct binder_object ptr_object;
3490 binder_size_t parent_offset;
3491 struct binder_object user_object;
3492 size_t user_parent_size;
3493 struct binder_fd_array_object *fda =
3494 to_binder_fd_array_object(hdr);
3495 size_t num_valid = (buffer_offset - off_start_offset) /
3496 sizeof(binder_size_t);
3497 struct binder_buffer_object *parent =
3498 binder_validate_ptr(target_proc, t->buffer,
3499 &ptr_object, fda->parent,
3500 off_start_offset,
3501 &parent_offset,
3502 num_valid);
3503 if (!parent) {
3504 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3505 proc->pid, thread->pid);
3506 return_error = BR_FAILED_REPLY;
3507 return_error_param = -EINVAL;
3508 return_error_line = __LINE__;
3509 goto err_bad_parent;
3510 }
3511 if (!binder_validate_fixup(target_proc, t->buffer,
3512 off_start_offset,
3513 parent_offset,
3514 fda->parent_offset,
3515 last_fixup_obj_off,
3516 last_fixup_min_off)) {
3517 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3518 proc->pid, thread->pid);
3519 return_error = BR_FAILED_REPLY;
3520 return_error_param = -EINVAL;
3521 return_error_line = __LINE__;
3522 goto err_bad_parent;
3523 }
3524 /*
3525 * We need to read the user version of the parent
3526 * object to get the original user offset
3527 */
3528 user_parent_size =
3529 binder_get_object(proc, user_buffer, t->buffer,
3530 parent_offset, &user_object);
3531 if (user_parent_size != sizeof(user_object.bbo)) {
3532 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3533 proc->pid, thread->pid,
3534 user_parent_size,
3535 sizeof(user_object.bbo));
3536 return_error = BR_FAILED_REPLY;
3537 return_error_param = -EINVAL;
3538 return_error_line = __LINE__;
3539 goto err_bad_parent;
3540 }
3541 ret = binder_translate_fd_array(&pf_head, fda,
3542 user_buffer, parent,
3543 &user_object.bbo, t,
3544 thread, in_reply_to);
3545 if (!ret)
3546 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3547 t->buffer,
3548 object_offset,
3549 fda, sizeof(*fda));
3550 if (ret) {
3551 binder_txn_error("%d:%d translate fd array failed\n",
3552 thread->pid, proc->pid);
3553 return_error = BR_FAILED_REPLY;
3554 return_error_param = ret > 0 ? -EINVAL : ret;
3555 return_error_line = __LINE__;
3556 goto err_translate_failed;
3557 }
3558 last_fixup_obj_off = parent_offset;
3559 last_fixup_min_off =
3560 fda->parent_offset + sizeof(u32) * fda->num_fds;
3561 } break;
3562 case BINDER_TYPE_PTR: {
3563 struct binder_buffer_object *bp =
3564 to_binder_buffer_object(hdr);
3565 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3566 size_t num_valid;
3567
3568 if (bp->length > buf_left) {
3569 binder_user_error("%d:%d got transaction with too large buffer\n",
3570 proc->pid, thread->pid);
3571 return_error = BR_FAILED_REPLY;
3572 return_error_param = -EINVAL;
3573 return_error_line = __LINE__;
3574 goto err_bad_offset;
3575 }
3576 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3577 (const void __user *)(uintptr_t)bp->buffer,
3578 bp->length);
3579 if (ret) {
3580 binder_txn_error("%d:%d deferred copy failed\n",
3581 thread->pid, proc->pid);
3582 return_error = BR_FAILED_REPLY;
3583 return_error_param = ret;
3584 return_error_line = __LINE__;
3585 goto err_translate_failed;
3586 }
3587 /* Fixup buffer pointer to target proc address space */
3588 bp->buffer = (uintptr_t)
3589 t->buffer->user_data + sg_buf_offset;
3590 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3591
3592 num_valid = (buffer_offset - off_start_offset) /
3593 sizeof(binder_size_t);
3594 ret = binder_fixup_parent(&pf_head, t,
3595 thread, bp,
3596 off_start_offset,
3597 num_valid,
3598 last_fixup_obj_off,
3599 last_fixup_min_off);
3600 if (ret < 0 ||
3601 binder_alloc_copy_to_buffer(&target_proc->alloc,
3602 t->buffer,
3603 object_offset,
3604 bp, sizeof(*bp))) {
3605 binder_txn_error("%d:%d failed to fixup parent\n",
3606 thread->pid, proc->pid);
3607 return_error = BR_FAILED_REPLY;
3608 return_error_param = ret;
3609 return_error_line = __LINE__;
3610 goto err_translate_failed;
3611 }
3612 last_fixup_obj_off = object_offset;
3613 last_fixup_min_off = 0;
3614 } break;
3615 default:
3616 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3617 proc->pid, thread->pid, hdr->type);
3618 return_error = BR_FAILED_REPLY;
3619 return_error_param = -EINVAL;
3620 return_error_line = __LINE__;
3621 goto err_bad_object_type;
3622 }
3623 }
3624 /* Done processing objects, copy the rest of the buffer */
3625 if (binder_alloc_copy_user_to_buffer(
3626 &target_proc->alloc,
3627 t->buffer, user_offset,
3628 user_buffer + user_offset,
3629 tr->data_size - user_offset)) {
3630 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3631 proc->pid, thread->pid);
3632 return_error = BR_FAILED_REPLY;
3633 return_error_param = -EFAULT;
3634 return_error_line = __LINE__;
3635 goto err_copy_data_failed;
3636 }
3637
3638 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3639 &sgc_head, &pf_head);
3640 if (ret) {
3641 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3642 proc->pid, thread->pid);
3643 return_error = BR_FAILED_REPLY;
3644 return_error_param = ret;
3645 return_error_line = __LINE__;
3646 goto err_copy_data_failed;
3647 }
3648 if (t->buffer->oneway_spam_suspect)
3649 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3650 else
3651 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3652 t->work.type = BINDER_WORK_TRANSACTION;
3653
3654 if (reply) {
3655 binder_enqueue_thread_work(thread, tcomplete);
3656 binder_inner_proc_lock(target_proc);
3657 if (target_thread->is_dead) {
3658 return_error = BR_DEAD_REPLY;
3659 binder_inner_proc_unlock(target_proc);
3660 goto err_dead_proc_or_thread;
3661 }
3662 BUG_ON(t->buffer->async_transaction != 0);
3663 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3664 t->timestamp = in_reply_to->timestamp;
3665 #endif
3666 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3667 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3668 target_proc->outstanding_txns++;
3669 binder_inner_proc_unlock(target_proc);
3670 wake_up_interruptible_sync(&target_thread->wait);
3671 binder_free_transaction(in_reply_to);
3672 } else if (!(t->flags & TF_ONE_WAY)) {
3673 BUG_ON(t->buffer->async_transaction != 0);
3674 binder_inner_proc_lock(proc);
3675 /*
3676 * Defer the TRANSACTION_COMPLETE, so we don't return to
3677 * userspace immediately; this allows the target process to
3678 * immediately start processing this transaction, reducing
3679 * latency. We will then return the TRANSACTION_COMPLETE when
3680 * the target replies (or there is an error).
3681 */
3682 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3683 t->need_reply = 1;
3684 t->from_parent = thread->transaction_stack;
3685 thread->transaction_stack = t;
3686 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3687 t->timestamp = binder_clock();
3688 #endif
3689 binder_inner_proc_unlock(proc);
3690 return_error = binder_proc_transaction(t,
3691 target_proc, target_thread);
3692 if (return_error) {
3693 binder_inner_proc_lock(proc);
3694 binder_pop_transaction_ilocked(thread, t);
3695 binder_inner_proc_unlock(proc);
3696 goto err_dead_proc_or_thread;
3697 }
3698 } else {
3699 BUG_ON(target_node == NULL);
3700 BUG_ON(t->buffer->async_transaction != 1);
3701 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3702 t->timestamp = binder_clock();
3703 #endif
3704 return_error = binder_proc_transaction(t, target_proc, NULL);
3705 /*
3706 * Let the caller know when async transaction reaches a frozen
3707 * process and is put in a pending queue, waiting for the target
3708 * process to be unfrozen.
3709 */
3710 if (return_error == BR_TRANSACTION_PENDING_FROZEN)
3711 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
3712 binder_enqueue_thread_work(thread, tcomplete);
3713 if (return_error &&
3714 return_error != BR_TRANSACTION_PENDING_FROZEN)
3715 goto err_dead_proc_or_thread;
3716 }
3717 if (target_thread)
3718 binder_thread_dec_tmpref(target_thread);
3719 binder_proc_dec_tmpref(target_proc);
3720 if (target_node)
3721 binder_dec_node_tmpref(target_node);
3722 /*
3723 * write barrier to synchronize with initialization
3724 * of log entry
3725 */
3726 smp_wmb();
3727 WRITE_ONCE(e->debug_id_done, t_debug_id);
3728 return;
3729
3730 err_dead_proc_or_thread:
3731 binder_txn_error("%d:%d dead process or thread\n",
3732 thread->pid, proc->pid);
3733 return_error_line = __LINE__;
3734 binder_dequeue_work(proc, tcomplete);
3735 err_translate_failed:
3736 err_bad_object_type:
3737 err_bad_offset:
3738 err_bad_parent:
3739 err_copy_data_failed:
3740 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3741 binder_free_txn_fixups(t);
3742 trace_binder_transaction_failed_buffer_release(t->buffer);
3743 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3744 buffer_offset, true);
3745 if (target_node)
3746 binder_dec_node_tmpref(target_node);
3747 target_node = NULL;
3748 t->buffer->transaction = NULL;
3749 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3750 err_binder_alloc_buf_failed:
3751 err_bad_extra_size:
3752 if (secctx)
3753 security_release_secctx(secctx, secctx_sz);
3754 err_get_secctx_failed:
3755 kfree(tcomplete);
3756 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3757 err_alloc_tcomplete_failed:
3758 if (trace_binder_txn_latency_free_enabled())
3759 binder_txn_latency_free(t);
3760 kfree(t);
3761 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3762 err_alloc_t_failed:
3763 err_bad_todo_list:
3764 err_bad_call_stack:
3765 err_empty_call_stack:
3766 err_dead_binder:
3767 err_invalid_target_handle:
3768 if (target_node) {
3769 binder_dec_node(target_node, 1, 0);
3770 binder_dec_node_tmpref(target_node);
3771 }
3772
3773 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3774 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3775 proc->pid, thread->pid, reply ? "reply" :
3776 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3777 target_proc ? target_proc->pid : 0,
3778 target_thread ? target_thread->pid : 0,
3779 t_debug_id, return_error, return_error_param,
3780 (u64)tr->data_size, (u64)tr->offsets_size,
3781 return_error_line);
3782
3783 if (target_thread)
3784 binder_thread_dec_tmpref(target_thread);
3785 if (target_proc)
3786 binder_proc_dec_tmpref(target_proc);
3787
3788 {
3789 struct binder_transaction_log_entry *fe;
3790
3791 e->return_error = return_error;
3792 e->return_error_param = return_error_param;
3793 e->return_error_line = return_error_line;
3794 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3795 *fe = *e;
3796 /*
3797 * write barrier to synchronize with initialization
3798 * of log entry
3799 */
3800 smp_wmb();
3801 WRITE_ONCE(e->debug_id_done, t_debug_id);
3802 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3803 }
3804
3805 BUG_ON(thread->return_error.cmd != BR_OK);
3806 if (in_reply_to) {
3807 binder_set_txn_from_error(in_reply_to, t_debug_id,
3808 return_error, return_error_param);
3809 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3810 binder_enqueue_thread_work(thread, &thread->return_error.work);
3811 binder_send_failed_reply(in_reply_to, return_error);
3812 } else {
3813 binder_inner_proc_lock(proc);
3814 binder_set_extended_error(&thread->ee, t_debug_id,
3815 return_error, return_error_param);
3816 binder_inner_proc_unlock(proc);
3817 thread->return_error.cmd = return_error;
3818 binder_enqueue_thread_work(thread, &thread->return_error.work);
3819 }
3820 }
3821
3822 /**
3823 * binder_free_buf() - free the specified buffer
3824 * @proc: binder proc that owns buffer
3825 * @buffer: buffer to be freed
3826 * @is_failure: failed to send transaction
3827 *
3828 * If buffer for an async transaction, enqueue the next async
3829 * transaction from the node.
3830 *
3831 * Cleanup buffer and free it.
3832 */
3833 static void
binder_free_buf(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, bool is_failure)3834 binder_free_buf(struct binder_proc *proc,
3835 struct binder_thread *thread,
3836 struct binder_buffer *buffer, bool is_failure)
3837 {
3838 binder_inner_proc_lock(proc);
3839 if (buffer->transaction) {
3840 buffer->transaction->buffer = NULL;
3841 buffer->transaction = NULL;
3842 }
3843 binder_inner_proc_unlock(proc);
3844 if (buffer->async_transaction && buffer->target_node) {
3845 struct binder_node *buf_node;
3846 struct binder_work *w;
3847
3848 buf_node = buffer->target_node;
3849 binder_node_inner_lock(buf_node);
3850 BUG_ON(!buf_node->has_async_transaction);
3851 BUG_ON(buf_node->proc != proc);
3852 w = binder_dequeue_work_head_ilocked(
3853 &buf_node->async_todo);
3854 if (!w) {
3855 buf_node->has_async_transaction = false;
3856 } else {
3857 binder_enqueue_work_ilocked(
3858 w, &proc->todo);
3859 binder_wakeup_proc_ilocked(proc);
3860 }
3861 binder_node_inner_unlock(buf_node);
3862 }
3863 trace_binder_transaction_buffer_release(buffer);
3864 binder_release_entire_buffer(proc, thread, buffer, is_failure);
3865 binder_alloc_free_buf(&proc->alloc, buffer);
3866 }
3867
binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed)3868 static int binder_thread_write(struct binder_proc *proc,
3869 struct binder_thread *thread,
3870 binder_uintptr_t binder_buffer, size_t size,
3871 binder_size_t *consumed)
3872 {
3873 uint32_t cmd;
3874 struct binder_context *context = proc->context;
3875 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3876 void __user *ptr = buffer + *consumed;
3877 void __user *end = buffer + size;
3878
3879 while (ptr < end && thread->return_error.cmd == BR_OK) {
3880 int ret;
3881
3882 if (get_user(cmd, (uint32_t __user *)ptr))
3883 return -EFAULT;
3884 ptr += sizeof(uint32_t);
3885 trace_binder_command(cmd);
3886 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3887 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3888 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3889 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3890 }
3891 switch (cmd) {
3892 case BC_INCREFS:
3893 case BC_ACQUIRE:
3894 case BC_RELEASE:
3895 case BC_DECREFS: {
3896 uint32_t target;
3897 const char *debug_string;
3898 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3899 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3900 struct binder_ref_data rdata;
3901
3902 if (get_user(target, (uint32_t __user *)ptr))
3903 return -EFAULT;
3904
3905 ptr += sizeof(uint32_t);
3906 ret = -1;
3907 if (increment && !target) {
3908 struct binder_node *ctx_mgr_node;
3909
3910 mutex_lock(&context->context_mgr_node_lock);
3911 ctx_mgr_node = context->binder_context_mgr_node;
3912 if (ctx_mgr_node) {
3913 if (ctx_mgr_node->proc == proc) {
3914 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3915 proc->pid, thread->pid);
3916 mutex_unlock(&context->context_mgr_node_lock);
3917 return -EINVAL;
3918 }
3919 ret = binder_inc_ref_for_node(
3920 proc, ctx_mgr_node,
3921 strong, NULL, &rdata);
3922 }
3923 mutex_unlock(&context->context_mgr_node_lock);
3924 }
3925 if (ret)
3926 ret = binder_update_ref_for_handle(
3927 proc, target, increment, strong,
3928 &rdata);
3929 if (!ret && rdata.desc != target) {
3930 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3931 proc->pid, thread->pid,
3932 target, rdata.desc);
3933 }
3934 switch (cmd) {
3935 case BC_INCREFS:
3936 debug_string = "IncRefs";
3937 break;
3938 case BC_ACQUIRE:
3939 debug_string = "Acquire";
3940 break;
3941 case BC_RELEASE:
3942 debug_string = "Release";
3943 break;
3944 case BC_DECREFS:
3945 default:
3946 debug_string = "DecRefs";
3947 break;
3948 }
3949 if (ret) {
3950 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3951 proc->pid, thread->pid, debug_string,
3952 strong, target, ret);
3953 break;
3954 }
3955 binder_debug(BINDER_DEBUG_USER_REFS,
3956 "%d:%d %s ref %d desc %d s %d w %d\n",
3957 proc->pid, thread->pid, debug_string,
3958 rdata.debug_id, rdata.desc, rdata.strong,
3959 rdata.weak);
3960 break;
3961 }
3962 case BC_INCREFS_DONE:
3963 case BC_ACQUIRE_DONE: {
3964 binder_uintptr_t node_ptr;
3965 binder_uintptr_t cookie;
3966 struct binder_node *node;
3967 bool free_node;
3968
3969 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3970 return -EFAULT;
3971 ptr += sizeof(binder_uintptr_t);
3972 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3973 return -EFAULT;
3974 ptr += sizeof(binder_uintptr_t);
3975 node = binder_get_node(proc, node_ptr);
3976 if (node == NULL) {
3977 binder_user_error("%d:%d %s u%016llx no match\n",
3978 proc->pid, thread->pid,
3979 cmd == BC_INCREFS_DONE ?
3980 "BC_INCREFS_DONE" :
3981 "BC_ACQUIRE_DONE",
3982 (u64)node_ptr);
3983 break;
3984 }
3985 if (cookie != node->cookie) {
3986 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3987 proc->pid, thread->pid,
3988 cmd == BC_INCREFS_DONE ?
3989 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3990 (u64)node_ptr, node->debug_id,
3991 (u64)cookie, (u64)node->cookie);
3992 binder_put_node(node);
3993 break;
3994 }
3995 binder_node_inner_lock(node);
3996 if (cmd == BC_ACQUIRE_DONE) {
3997 if (node->pending_strong_ref == 0) {
3998 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3999 proc->pid, thread->pid,
4000 node->debug_id);
4001 binder_node_inner_unlock(node);
4002 binder_put_node(node);
4003 break;
4004 }
4005 node->pending_strong_ref = 0;
4006 } else {
4007 if (node->pending_weak_ref == 0) {
4008 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4009 proc->pid, thread->pid,
4010 node->debug_id);
4011 binder_node_inner_unlock(node);
4012 binder_put_node(node);
4013 break;
4014 }
4015 node->pending_weak_ref = 0;
4016 }
4017 free_node = binder_dec_node_nilocked(node,
4018 cmd == BC_ACQUIRE_DONE, 0);
4019 WARN_ON(free_node);
4020 binder_debug(BINDER_DEBUG_USER_REFS,
4021 "%d:%d %s node %d ls %d lw %d tr %d\n",
4022 proc->pid, thread->pid,
4023 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4024 node->debug_id, node->local_strong_refs,
4025 node->local_weak_refs, node->tmp_refs);
4026 binder_node_inner_unlock(node);
4027 binder_put_node(node);
4028 break;
4029 }
4030 case BC_ATTEMPT_ACQUIRE:
4031 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4032 return -EINVAL;
4033 case BC_ACQUIRE_RESULT:
4034 pr_err("BC_ACQUIRE_RESULT not supported\n");
4035 return -EINVAL;
4036
4037 case BC_FREE_BUFFER: {
4038 binder_uintptr_t data_ptr;
4039 struct binder_buffer *buffer;
4040
4041 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4042 return -EFAULT;
4043 ptr += sizeof(binder_uintptr_t);
4044
4045 buffer = binder_alloc_prepare_to_free(&proc->alloc,
4046 data_ptr);
4047 if (IS_ERR_OR_NULL(buffer)) {
4048 if (PTR_ERR(buffer) == -EPERM) {
4049 binder_user_error(
4050 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4051 proc->pid, thread->pid,
4052 (u64)data_ptr);
4053 } else {
4054 binder_user_error(
4055 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
4056 proc->pid, thread->pid,
4057 (u64)data_ptr);
4058 }
4059 break;
4060 }
4061 binder_debug(BINDER_DEBUG_FREE_BUFFER,
4062 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4063 proc->pid, thread->pid, (u64)data_ptr,
4064 buffer->debug_id,
4065 buffer->transaction ? "active" : "finished");
4066 binder_free_buf(proc, thread, buffer, false);
4067 break;
4068 }
4069
4070 case BC_TRANSACTION_SG:
4071 case BC_REPLY_SG: {
4072 struct binder_transaction_data_sg tr;
4073
4074 if (copy_from_user(&tr, ptr, sizeof(tr)))
4075 return -EFAULT;
4076 ptr += sizeof(tr);
4077 binder_transaction(proc, thread, &tr.transaction_data,
4078 cmd == BC_REPLY_SG, tr.buffers_size);
4079 break;
4080 }
4081 case BC_TRANSACTION:
4082 case BC_REPLY: {
4083 struct binder_transaction_data tr;
4084
4085 if (copy_from_user(&tr, ptr, sizeof(tr)))
4086 return -EFAULT;
4087 ptr += sizeof(tr);
4088 binder_transaction(proc, thread, &tr,
4089 cmd == BC_REPLY, 0);
4090 break;
4091 }
4092
4093 case BC_REGISTER_LOOPER:
4094 binder_debug(BINDER_DEBUG_THREADS,
4095 "%d:%d BC_REGISTER_LOOPER\n",
4096 proc->pid, thread->pid);
4097 binder_inner_proc_lock(proc);
4098 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4099 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4100 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4101 proc->pid, thread->pid);
4102 } else if (proc->requested_threads == 0) {
4103 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4104 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4105 proc->pid, thread->pid);
4106 } else {
4107 proc->requested_threads--;
4108 proc->requested_threads_started++;
4109 }
4110 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4111 binder_inner_proc_unlock(proc);
4112 break;
4113 case BC_ENTER_LOOPER:
4114 binder_debug(BINDER_DEBUG_THREADS,
4115 "%d:%d BC_ENTER_LOOPER\n",
4116 proc->pid, thread->pid);
4117 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4118 thread->looper |= BINDER_LOOPER_STATE_INVALID;
4119 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4120 proc->pid, thread->pid);
4121 }
4122 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4123 break;
4124 case BC_EXIT_LOOPER:
4125 binder_debug(BINDER_DEBUG_THREADS,
4126 "%d:%d BC_EXIT_LOOPER\n",
4127 proc->pid, thread->pid);
4128 thread->looper |= BINDER_LOOPER_STATE_EXITED;
4129 break;
4130
4131 case BC_REQUEST_DEATH_NOTIFICATION:
4132 case BC_CLEAR_DEATH_NOTIFICATION: {
4133 uint32_t target;
4134 binder_uintptr_t cookie;
4135 struct binder_ref *ref;
4136 struct binder_ref_death *death = NULL;
4137
4138 if (get_user(target, (uint32_t __user *)ptr))
4139 return -EFAULT;
4140 ptr += sizeof(uint32_t);
4141 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4142 return -EFAULT;
4143 ptr += sizeof(binder_uintptr_t);
4144 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4145 /*
4146 * Allocate memory for death notification
4147 * before taking lock
4148 */
4149 death = kzalloc(sizeof(*death), GFP_KERNEL);
4150 if (death == NULL) {
4151 WARN_ON(thread->return_error.cmd !=
4152 BR_OK);
4153 thread->return_error.cmd = BR_ERROR;
4154 binder_enqueue_thread_work(
4155 thread,
4156 &thread->return_error.work);
4157 binder_debug(
4158 BINDER_DEBUG_FAILED_TRANSACTION,
4159 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4160 proc->pid, thread->pid);
4161 break;
4162 }
4163 }
4164 binder_proc_lock(proc);
4165 ref = binder_get_ref_olocked(proc, target, false);
4166 if (ref == NULL) {
4167 binder_user_error("%d:%d %s invalid ref %d\n",
4168 proc->pid, thread->pid,
4169 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4170 "BC_REQUEST_DEATH_NOTIFICATION" :
4171 "BC_CLEAR_DEATH_NOTIFICATION",
4172 target);
4173 binder_proc_unlock(proc);
4174 kfree(death);
4175 break;
4176 }
4177
4178 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4179 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4180 proc->pid, thread->pid,
4181 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4182 "BC_REQUEST_DEATH_NOTIFICATION" :
4183 "BC_CLEAR_DEATH_NOTIFICATION",
4184 (u64)cookie, ref->data.debug_id,
4185 ref->data.desc, ref->data.strong,
4186 ref->data.weak, ref->node->debug_id);
4187
4188 binder_node_lock(ref->node);
4189 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4190 if (ref->death) {
4191 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4192 proc->pid, thread->pid);
4193 binder_node_unlock(ref->node);
4194 binder_proc_unlock(proc);
4195 kfree(death);
4196 break;
4197 }
4198 binder_stats_created(BINDER_STAT_DEATH);
4199 INIT_LIST_HEAD(&death->work.entry);
4200 death->cookie = cookie;
4201 ref->death = death;
4202 if (ref->node->proc == NULL) {
4203 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4204
4205 binder_inner_proc_lock(proc);
4206 binder_enqueue_work_ilocked(
4207 &ref->death->work, &proc->todo);
4208 binder_wakeup_proc_ilocked(proc);
4209 binder_inner_proc_unlock(proc);
4210 }
4211 } else {
4212 if (ref->death == NULL) {
4213 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4214 proc->pid, thread->pid);
4215 binder_node_unlock(ref->node);
4216 binder_proc_unlock(proc);
4217 break;
4218 }
4219 death = ref->death;
4220 if (death->cookie != cookie) {
4221 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4222 proc->pid, thread->pid,
4223 (u64)death->cookie,
4224 (u64)cookie);
4225 binder_node_unlock(ref->node);
4226 binder_proc_unlock(proc);
4227 break;
4228 }
4229 ref->death = NULL;
4230 binder_inner_proc_lock(proc);
4231 if (list_empty(&death->work.entry)) {
4232 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4233 if (thread->looper &
4234 (BINDER_LOOPER_STATE_REGISTERED |
4235 BINDER_LOOPER_STATE_ENTERED))
4236 binder_enqueue_thread_work_ilocked(
4237 thread,
4238 &death->work);
4239 else {
4240 binder_enqueue_work_ilocked(
4241 &death->work,
4242 &proc->todo);
4243 binder_wakeup_proc_ilocked(
4244 proc);
4245 }
4246 } else {
4247 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4248 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4249 }
4250 binder_inner_proc_unlock(proc);
4251 }
4252 binder_node_unlock(ref->node);
4253 binder_proc_unlock(proc);
4254 } break;
4255 case BC_DEAD_BINDER_DONE: {
4256 struct binder_work *w;
4257 binder_uintptr_t cookie;
4258 struct binder_ref_death *death = NULL;
4259
4260 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4261 return -EFAULT;
4262
4263 ptr += sizeof(cookie);
4264 binder_inner_proc_lock(proc);
4265 list_for_each_entry(w, &proc->delivered_death,
4266 entry) {
4267 struct binder_ref_death *tmp_death =
4268 container_of(w,
4269 struct binder_ref_death,
4270 work);
4271
4272 if (tmp_death->cookie == cookie) {
4273 death = tmp_death;
4274 break;
4275 }
4276 }
4277 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4278 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4279 proc->pid, thread->pid, (u64)cookie,
4280 death);
4281 if (death == NULL) {
4282 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4283 proc->pid, thread->pid, (u64)cookie);
4284 binder_inner_proc_unlock(proc);
4285 break;
4286 }
4287 binder_dequeue_work_ilocked(&death->work);
4288 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4289 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4290 if (thread->looper &
4291 (BINDER_LOOPER_STATE_REGISTERED |
4292 BINDER_LOOPER_STATE_ENTERED))
4293 binder_enqueue_thread_work_ilocked(
4294 thread, &death->work);
4295 else {
4296 binder_enqueue_work_ilocked(
4297 &death->work,
4298 &proc->todo);
4299 binder_wakeup_proc_ilocked(proc);
4300 }
4301 }
4302 binder_inner_proc_unlock(proc);
4303 } break;
4304
4305 default:
4306 pr_err("%d:%d unknown command %u\n",
4307 proc->pid, thread->pid, cmd);
4308 return -EINVAL;
4309 }
4310 *consumed = ptr - buffer;
4311 }
4312 return 0;
4313 }
4314
binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd)4315 static void binder_stat_br(struct binder_proc *proc,
4316 struct binder_thread *thread, uint32_t cmd)
4317 {
4318 trace_binder_return(cmd);
4319 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4320 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4321 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4322 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4323 }
4324 }
4325
binder_put_node_cmd(struct binder_proc *proc, struct binder_thread *thread, void __user **ptrp, binder_uintptr_t node_ptr, binder_uintptr_t node_cookie, int node_debug_id, uint32_t cmd, const char *cmd_name)4326 static int binder_put_node_cmd(struct binder_proc *proc,
4327 struct binder_thread *thread,
4328 void __user **ptrp,
4329 binder_uintptr_t node_ptr,
4330 binder_uintptr_t node_cookie,
4331 int node_debug_id,
4332 uint32_t cmd, const char *cmd_name)
4333 {
4334 void __user *ptr = *ptrp;
4335
4336 if (put_user(cmd, (uint32_t __user *)ptr))
4337 return -EFAULT;
4338 ptr += sizeof(uint32_t);
4339
4340 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4341 return -EFAULT;
4342 ptr += sizeof(binder_uintptr_t);
4343
4344 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4345 return -EFAULT;
4346 ptr += sizeof(binder_uintptr_t);
4347
4348 binder_stat_br(proc, thread, cmd);
4349 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4350 proc->pid, thread->pid, cmd_name, node_debug_id,
4351 (u64)node_ptr, (u64)node_cookie);
4352
4353 *ptrp = ptr;
4354 return 0;
4355 }
4356
binder_wait_for_work(struct binder_thread *thread, bool do_proc_work)4357 static int binder_wait_for_work(struct binder_thread *thread,
4358 bool do_proc_work)
4359 {
4360 DEFINE_WAIT(wait);
4361 struct binder_proc *proc = thread->proc;
4362 int ret = 0;
4363
4364 binder_inner_proc_lock(proc);
4365 for (;;) {
4366 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE);
4367 if (binder_has_work_ilocked(thread, do_proc_work))
4368 break;
4369 if (do_proc_work)
4370 list_add(&thread->waiting_thread_node,
4371 &proc->waiting_threads);
4372 binder_inner_proc_unlock(proc);
4373 schedule();
4374 binder_inner_proc_lock(proc);
4375 list_del_init(&thread->waiting_thread_node);
4376 if (signal_pending(current)) {
4377 ret = -EINTR;
4378 break;
4379 }
4380 }
4381 finish_wait(&thread->wait, &wait);
4382 binder_inner_proc_unlock(proc);
4383
4384 return ret;
4385 }
4386
4387 /**
4388 * binder_apply_fd_fixups() - finish fd translation
4389 * @proc: binder_proc associated @t->buffer
4390 * @t: binder transaction with list of fd fixups
4391 *
4392 * Now that we are in the context of the transaction target
4393 * process, we can allocate and install fds. Process the
4394 * list of fds to translate and fixup the buffer with the
4395 * new fds first and only then install the files.
4396 *
4397 * If we fail to allocate an fd, skip the install and release
4398 * any fds that have already been allocated.
4399 */
binder_apply_fd_fixups(struct binder_proc *proc, struct binder_transaction *t)4400 static int binder_apply_fd_fixups(struct binder_proc *proc,
4401 struct binder_transaction *t)
4402 {
4403 struct binder_txn_fd_fixup *fixup, *tmp;
4404 int ret = 0;
4405
4406 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4407 int fd = get_unused_fd_flags(O_CLOEXEC);
4408
4409 if (fd < 0) {
4410 binder_debug(BINDER_DEBUG_TRANSACTION,
4411 "failed fd fixup txn %d fd %d\n",
4412 t->debug_id, fd);
4413 ret = -ENOMEM;
4414 goto err;
4415 }
4416 binder_debug(BINDER_DEBUG_TRANSACTION,
4417 "fd fixup txn %d fd %d\n",
4418 t->debug_id, fd);
4419 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4420 fixup->target_fd = fd;
4421 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4422 fixup->offset, &fd,
4423 sizeof(u32))) {
4424 ret = -EINVAL;
4425 goto err;
4426 }
4427 }
4428 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4429 fd_install(fixup->target_fd, fixup->file);
4430 list_del(&fixup->fixup_entry);
4431 kfree(fixup);
4432 }
4433
4434 return ret;
4435
4436 err:
4437 binder_free_txn_fixups(t);
4438 return ret;
4439 }
4440
binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block)4441 static int binder_thread_read(struct binder_proc *proc,
4442 struct binder_thread *thread,
4443 binder_uintptr_t binder_buffer, size_t size,
4444 binder_size_t *consumed, int non_block)
4445 {
4446 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4447 void __user *ptr = buffer + *consumed;
4448 void __user *end = buffer + size;
4449
4450 int ret = 0;
4451 int wait_for_proc_work;
4452
4453 if (*consumed == 0) {
4454 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4455 return -EFAULT;
4456 ptr += sizeof(uint32_t);
4457 }
4458
4459 retry:
4460 binder_inner_proc_lock(proc);
4461 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4462 binder_inner_proc_unlock(proc);
4463
4464 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4465
4466 trace_binder_wait_for_work(wait_for_proc_work,
4467 !!thread->transaction_stack,
4468 !binder_worklist_empty(proc, &thread->todo));
4469 if (wait_for_proc_work) {
4470 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4471 BINDER_LOOPER_STATE_ENTERED))) {
4472 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4473 proc->pid, thread->pid, thread->looper);
4474 wait_event_interruptible(binder_user_error_wait,
4475 binder_stop_on_user_error < 2);
4476 }
4477 binder_set_nice(proc->default_priority);
4478 }
4479
4480 if (non_block) {
4481 if (!binder_has_work(thread, wait_for_proc_work))
4482 ret = -EAGAIN;
4483 } else {
4484 ret = binder_wait_for_work(thread, wait_for_proc_work);
4485 }
4486
4487 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4488
4489 if (ret)
4490 return ret;
4491
4492 while (1) {
4493 uint32_t cmd;
4494 struct binder_transaction_data_secctx tr;
4495 struct binder_transaction_data *trd = &tr.transaction_data;
4496 struct binder_work *w = NULL;
4497 struct list_head *list = NULL;
4498 struct binder_transaction *t = NULL;
4499 struct binder_thread *t_from;
4500 size_t trsize = sizeof(*trd);
4501
4502 binder_inner_proc_lock(proc);
4503 if (!binder_worklist_empty_ilocked(&thread->todo))
4504 list = &thread->todo;
4505 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4506 wait_for_proc_work)
4507 list = &proc->todo;
4508 else {
4509 binder_inner_proc_unlock(proc);
4510
4511 /* no data added */
4512 if (ptr - buffer == 4 && !thread->looper_need_return)
4513 goto retry;
4514 break;
4515 }
4516
4517 if (end - ptr < sizeof(tr) + 4) {
4518 binder_inner_proc_unlock(proc);
4519 break;
4520 }
4521 w = binder_dequeue_work_head_ilocked(list);
4522 if (binder_worklist_empty_ilocked(&thread->todo))
4523 thread->process_todo = false;
4524
4525 switch (w->type) {
4526 case BINDER_WORK_TRANSACTION: {
4527 binder_inner_proc_unlock(proc);
4528 t = container_of(w, struct binder_transaction, work);
4529 } break;
4530 case BINDER_WORK_RETURN_ERROR: {
4531 struct binder_error *e = container_of(
4532 w, struct binder_error, work);
4533
4534 WARN_ON(e->cmd == BR_OK);
4535 binder_inner_proc_unlock(proc);
4536 if (put_user(e->cmd, (uint32_t __user *)ptr))
4537 return -EFAULT;
4538 cmd = e->cmd;
4539 e->cmd = BR_OK;
4540 ptr += sizeof(uint32_t);
4541
4542 binder_stat_br(proc, thread, cmd);
4543 } break;
4544 case BINDER_WORK_TRANSACTION_COMPLETE:
4545 case BINDER_WORK_TRANSACTION_PENDING:
4546 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4547 if (proc->oneway_spam_detection_enabled &&
4548 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4549 cmd = BR_ONEWAY_SPAM_SUSPECT;
4550 else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
4551 cmd = BR_TRANSACTION_PENDING_FROZEN;
4552 else
4553 cmd = BR_TRANSACTION_COMPLETE;
4554 binder_inner_proc_unlock(proc);
4555 kfree(w);
4556 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4557 if (put_user(cmd, (uint32_t __user *)ptr))
4558 return -EFAULT;
4559 ptr += sizeof(uint32_t);
4560
4561 binder_stat_br(proc, thread, cmd);
4562 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4563 "%d:%d BR_TRANSACTION_COMPLETE\n",
4564 proc->pid, thread->pid);
4565 } break;
4566 case BINDER_WORK_NODE: {
4567 struct binder_node *node = container_of(w, struct binder_node, work);
4568 int strong, weak;
4569 binder_uintptr_t node_ptr = node->ptr;
4570 binder_uintptr_t node_cookie = node->cookie;
4571 int node_debug_id = node->debug_id;
4572 int has_weak_ref;
4573 int has_strong_ref;
4574 void __user *orig_ptr = ptr;
4575
4576 BUG_ON(proc != node->proc);
4577 strong = node->internal_strong_refs ||
4578 node->local_strong_refs;
4579 weak = !hlist_empty(&node->refs) ||
4580 node->local_weak_refs ||
4581 node->tmp_refs || strong;
4582 has_strong_ref = node->has_strong_ref;
4583 has_weak_ref = node->has_weak_ref;
4584
4585 if (weak && !has_weak_ref) {
4586 node->has_weak_ref = 1;
4587 node->pending_weak_ref = 1;
4588 node->local_weak_refs++;
4589 }
4590 if (strong && !has_strong_ref) {
4591 node->has_strong_ref = 1;
4592 node->pending_strong_ref = 1;
4593 node->local_strong_refs++;
4594 }
4595 if (!strong && has_strong_ref)
4596 node->has_strong_ref = 0;
4597 if (!weak && has_weak_ref)
4598 node->has_weak_ref = 0;
4599 if (!weak && !strong) {
4600 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4601 "%d:%d node %d u%016llx c%016llx deleted\n",
4602 proc->pid, thread->pid,
4603 node_debug_id,
4604 (u64)node_ptr,
4605 (u64)node_cookie);
4606 rb_erase(&node->rb_node, &proc->nodes);
4607 binder_inner_proc_unlock(proc);
4608 binder_node_lock(node);
4609 /*
4610 * Acquire the node lock before freeing the
4611 * node to serialize with other threads that
4612 * may have been holding the node lock while
4613 * decrementing this node (avoids race where
4614 * this thread frees while the other thread
4615 * is unlocking the node after the final
4616 * decrement)
4617 */
4618 binder_node_unlock(node);
4619 binder_free_node(node);
4620 } else
4621 binder_inner_proc_unlock(proc);
4622
4623 if (weak && !has_weak_ref)
4624 ret = binder_put_node_cmd(
4625 proc, thread, &ptr, node_ptr,
4626 node_cookie, node_debug_id,
4627 BR_INCREFS, "BR_INCREFS");
4628 if (!ret && strong && !has_strong_ref)
4629 ret = binder_put_node_cmd(
4630 proc, thread, &ptr, node_ptr,
4631 node_cookie, node_debug_id,
4632 BR_ACQUIRE, "BR_ACQUIRE");
4633 if (!ret && !strong && has_strong_ref)
4634 ret = binder_put_node_cmd(
4635 proc, thread, &ptr, node_ptr,
4636 node_cookie, node_debug_id,
4637 BR_RELEASE, "BR_RELEASE");
4638 if (!ret && !weak && has_weak_ref)
4639 ret = binder_put_node_cmd(
4640 proc, thread, &ptr, node_ptr,
4641 node_cookie, node_debug_id,
4642 BR_DECREFS, "BR_DECREFS");
4643 if (orig_ptr == ptr)
4644 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4645 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4646 proc->pid, thread->pid,
4647 node_debug_id,
4648 (u64)node_ptr,
4649 (u64)node_cookie);
4650 if (ret)
4651 return ret;
4652 } break;
4653 case BINDER_WORK_DEAD_BINDER:
4654 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4655 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4656 struct binder_ref_death *death;
4657 uint32_t cmd;
4658 binder_uintptr_t cookie;
4659
4660 death = container_of(w, struct binder_ref_death, work);
4661 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4662 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4663 else
4664 cmd = BR_DEAD_BINDER;
4665 cookie = death->cookie;
4666
4667 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4668 "%d:%d %s %016llx\n",
4669 proc->pid, thread->pid,
4670 cmd == BR_DEAD_BINDER ?
4671 "BR_DEAD_BINDER" :
4672 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4673 (u64)cookie);
4674 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4675 binder_inner_proc_unlock(proc);
4676 kfree(death);
4677 binder_stats_deleted(BINDER_STAT_DEATH);
4678 } else {
4679 binder_enqueue_work_ilocked(
4680 w, &proc->delivered_death);
4681 binder_inner_proc_unlock(proc);
4682 }
4683 if (put_user(cmd, (uint32_t __user *)ptr))
4684 return -EFAULT;
4685 ptr += sizeof(uint32_t);
4686 if (put_user(cookie,
4687 (binder_uintptr_t __user *)ptr))
4688 return -EFAULT;
4689 ptr += sizeof(binder_uintptr_t);
4690 binder_stat_br(proc, thread, cmd);
4691 if (cmd == BR_DEAD_BINDER)
4692 goto done; /* DEAD_BINDER notifications can cause transactions */
4693 } break;
4694 default:
4695 binder_inner_proc_unlock(proc);
4696 pr_err("%d:%d: bad work type %d\n",
4697 proc->pid, thread->pid, w->type);
4698 break;
4699 }
4700
4701 if (!t)
4702 continue;
4703
4704 BUG_ON(t->buffer == NULL);
4705 if (t->buffer->target_node) {
4706 struct binder_node *target_node = t->buffer->target_node;
4707
4708 trd->target.ptr = target_node->ptr;
4709 trd->cookie = target_node->cookie;
4710 t->saved_priority = task_nice(current);
4711 if (t->priority < target_node->min_priority &&
4712 !(t->flags & TF_ONE_WAY))
4713 binder_set_nice(t->priority);
4714 else if (!(t->flags & TF_ONE_WAY) ||
4715 t->saved_priority > target_node->min_priority)
4716 binder_set_nice(target_node->min_priority);
4717 cmd = BR_TRANSACTION;
4718 } else {
4719 trd->target.ptr = 0;
4720 trd->cookie = 0;
4721 cmd = BR_REPLY;
4722 }
4723 trd->code = t->code;
4724 trd->flags = t->flags;
4725 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4726
4727 t_from = binder_get_txn_from(t);
4728 if (t_from) {
4729 struct task_struct *sender = t_from->proc->tsk;
4730
4731 trd->sender_pid =
4732 task_tgid_nr_ns(sender,
4733 task_active_pid_ns(current));
4734 #ifdef CONFIG_BINDER_SENDER_INFO
4735 binder_inner_proc_lock(thread->proc);
4736 thread->sender_pid_nr = task_tgid_nr(sender);
4737 binder_inner_proc_unlock(thread->proc);
4738 #endif
4739 } else {
4740 trd->sender_pid = 0;
4741 #ifdef CONFIG_BINDER_SENDER_INFO
4742 binder_inner_proc_lock(thread->proc);
4743 thread->sender_pid_nr = 0;
4744 binder_inner_proc_unlock(thread->proc);
4745 #endif
4746 }
4747
4748 ret = binder_apply_fd_fixups(proc, t);
4749 if (ret) {
4750 struct binder_buffer *buffer = t->buffer;
4751 bool oneway = !!(t->flags & TF_ONE_WAY);
4752 int tid = t->debug_id;
4753
4754 if (t_from)
4755 binder_thread_dec_tmpref(t_from);
4756 buffer->transaction = NULL;
4757 binder_cleanup_transaction(t, "fd fixups failed",
4758 BR_FAILED_REPLY);
4759 binder_free_buf(proc, thread, buffer, true);
4760 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4761 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4762 proc->pid, thread->pid,
4763 oneway ? "async " :
4764 (cmd == BR_REPLY ? "reply " : ""),
4765 tid, BR_FAILED_REPLY, ret, __LINE__);
4766 if (cmd == BR_REPLY) {
4767 cmd = BR_FAILED_REPLY;
4768 if (put_user(cmd, (uint32_t __user *)ptr))
4769 return -EFAULT;
4770 ptr += sizeof(uint32_t);
4771 binder_stat_br(proc, thread, cmd);
4772 break;
4773 }
4774 continue;
4775 }
4776 trd->data_size = t->buffer->data_size;
4777 trd->offsets_size = t->buffer->offsets_size;
4778 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4779 trd->data.ptr.offsets = trd->data.ptr.buffer +
4780 ALIGN(t->buffer->data_size,
4781 sizeof(void *));
4782
4783 tr.secctx = t->security_ctx;
4784 if (t->security_ctx) {
4785 cmd = BR_TRANSACTION_SEC_CTX;
4786 trsize = sizeof(tr);
4787 }
4788 if (put_user(cmd, (uint32_t __user *)ptr)) {
4789 if (t_from)
4790 binder_thread_dec_tmpref(t_from);
4791
4792 binder_cleanup_transaction(t, "put_user failed",
4793 BR_FAILED_REPLY);
4794
4795 return -EFAULT;
4796 }
4797 ptr += sizeof(uint32_t);
4798 if (copy_to_user(ptr, &tr, trsize)) {
4799 if (t_from)
4800 binder_thread_dec_tmpref(t_from);
4801
4802 binder_cleanup_transaction(t, "copy_to_user failed",
4803 BR_FAILED_REPLY);
4804
4805 return -EFAULT;
4806 }
4807 ptr += trsize;
4808
4809 trace_binder_transaction_received(t);
4810 binder_stat_br(proc, thread, cmd);
4811 binder_debug(BINDER_DEBUG_TRANSACTION,
4812 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4813 proc->pid, thread->pid,
4814 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4815 (cmd == BR_TRANSACTION_SEC_CTX) ?
4816 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4817 t->debug_id, t_from ? t_from->proc->pid : 0,
4818 t_from ? t_from->pid : 0, cmd,
4819 t->buffer->data_size, t->buffer->offsets_size,
4820 (u64)trd->data.ptr.buffer,
4821 (u64)trd->data.ptr.offsets);
4822
4823 if (t_from)
4824 binder_thread_dec_tmpref(t_from);
4825 t->buffer->allow_user_free = 1;
4826 #ifdef CONFIG_ACCESS_TOKENID
4827 binder_inner_proc_lock(thread->proc);
4828 thread->tokens.sender_tokenid = t->sender_tokenid;
4829 thread->tokens.first_tokenid = t->first_tokenid;
4830 binder_inner_proc_unlock(thread->proc);
4831 #endif /* CONFIG_ACCESS_TOKENID */
4832 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4833 binder_inner_proc_lock(thread->proc);
4834 t->to_parent = thread->transaction_stack;
4835 t->to_thread = thread;
4836 thread->transaction_stack = t;
4837 binder_inner_proc_unlock(thread->proc);
4838 } else {
4839 binder_free_transaction(t);
4840 }
4841 break;
4842 }
4843
4844 done:
4845
4846 *consumed = ptr - buffer;
4847 binder_inner_proc_lock(proc);
4848 if (proc->requested_threads == 0 &&
4849 list_empty(&thread->proc->waiting_threads) &&
4850 proc->requested_threads_started < proc->max_threads &&
4851 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4852 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4853 /*spawn a new thread if we leave this out */) {
4854 proc->requested_threads++;
4855 binder_inner_proc_unlock(proc);
4856 binder_debug(BINDER_DEBUG_THREADS,
4857 "%d:%d BR_SPAWN_LOOPER\n",
4858 proc->pid, thread->pid);
4859 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4860 return -EFAULT;
4861 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4862 } else
4863 binder_inner_proc_unlock(proc);
4864 return 0;
4865 }
4866
binder_release_work(struct binder_proc *proc, struct list_head *list)4867 static void binder_release_work(struct binder_proc *proc,
4868 struct list_head *list)
4869 {
4870 struct binder_work *w;
4871 enum binder_work_type wtype;
4872
4873 while (1) {
4874 binder_inner_proc_lock(proc);
4875 w = binder_dequeue_work_head_ilocked(list);
4876 wtype = w ? w->type : 0;
4877 binder_inner_proc_unlock(proc);
4878 if (!w)
4879 return;
4880
4881 switch (wtype) {
4882 case BINDER_WORK_TRANSACTION: {
4883 struct binder_transaction *t;
4884
4885 t = container_of(w, struct binder_transaction, work);
4886
4887 binder_cleanup_transaction(t, "process died.",
4888 BR_DEAD_REPLY);
4889 } break;
4890 case BINDER_WORK_RETURN_ERROR: {
4891 struct binder_error *e = container_of(
4892 w, struct binder_error, work);
4893
4894 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4895 "undelivered TRANSACTION_ERROR: %u\n",
4896 e->cmd);
4897 } break;
4898 case BINDER_WORK_TRANSACTION_PENDING:
4899 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
4900 case BINDER_WORK_TRANSACTION_COMPLETE: {
4901 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4902 "undelivered TRANSACTION_COMPLETE\n");
4903 kfree(w);
4904 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4905 } break;
4906 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4907 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4908 struct binder_ref_death *death;
4909
4910 death = container_of(w, struct binder_ref_death, work);
4911 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4912 "undelivered death notification, %016llx\n",
4913 (u64)death->cookie);
4914 kfree(death);
4915 binder_stats_deleted(BINDER_STAT_DEATH);
4916 } break;
4917 case BINDER_WORK_NODE:
4918 break;
4919 default:
4920 pr_err("unexpected work type, %d, not freed\n",
4921 wtype);
4922 break;
4923 }
4924 }
4925
4926 }
4927
binder_get_thread_ilocked( struct binder_proc *proc, struct binder_thread *new_thread)4928 static struct binder_thread *binder_get_thread_ilocked(
4929 struct binder_proc *proc, struct binder_thread *new_thread)
4930 {
4931 struct binder_thread *thread = NULL;
4932 struct rb_node *parent = NULL;
4933 struct rb_node **p = &proc->threads.rb_node;
4934
4935 while (*p) {
4936 parent = *p;
4937 thread = rb_entry(parent, struct binder_thread, rb_node);
4938
4939 if (current->pid < thread->pid)
4940 p = &(*p)->rb_left;
4941 else if (current->pid > thread->pid)
4942 p = &(*p)->rb_right;
4943 else
4944 return thread;
4945 }
4946 if (!new_thread)
4947 return NULL;
4948 thread = new_thread;
4949 binder_stats_created(BINDER_STAT_THREAD);
4950 thread->proc = proc;
4951 thread->pid = current->pid;
4952 atomic_set(&thread->tmp_ref, 0);
4953 init_waitqueue_head(&thread->wait);
4954 INIT_LIST_HEAD(&thread->todo);
4955 rb_link_node(&thread->rb_node, parent, p);
4956 rb_insert_color(&thread->rb_node, &proc->threads);
4957 thread->looper_need_return = true;
4958 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4959 thread->return_error.cmd = BR_OK;
4960 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4961 thread->reply_error.cmd = BR_OK;
4962 thread->ee.command = BR_OK;
4963 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4964 return thread;
4965 }
4966
binder_get_thread(struct binder_proc *proc)4967 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4968 {
4969 struct binder_thread *thread;
4970 struct binder_thread *new_thread;
4971
4972 binder_inner_proc_lock(proc);
4973 thread = binder_get_thread_ilocked(proc, NULL);
4974 binder_inner_proc_unlock(proc);
4975 if (!thread) {
4976 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4977 if (new_thread == NULL)
4978 return NULL;
4979 binder_inner_proc_lock(proc);
4980 thread = binder_get_thread_ilocked(proc, new_thread);
4981 binder_inner_proc_unlock(proc);
4982 if (thread != new_thread)
4983 kfree(new_thread);
4984 }
4985 return thread;
4986 }
4987
binder_free_proc(struct binder_proc *proc)4988 static void binder_free_proc(struct binder_proc *proc)
4989 {
4990 struct binder_device *device;
4991
4992 BUG_ON(!list_empty(&proc->todo));
4993 BUG_ON(!list_empty(&proc->delivered_death));
4994 if (proc->outstanding_txns)
4995 pr_warn("%s: Unexpected outstanding_txns %d\n",
4996 __func__, proc->outstanding_txns);
4997 device = container_of(proc->context, struct binder_device, context);
4998 if (refcount_dec_and_test(&device->ref)) {
4999 kfree(proc->context->name);
5000 kfree(device);
5001 }
5002 binder_alloc_deferred_release(&proc->alloc);
5003 put_task_struct(proc->tsk);
5004 put_cred(proc->cred);
5005 binder_stats_deleted(BINDER_STAT_PROC);
5006 kfree(proc);
5007 }
5008
binder_free_thread(struct binder_thread *thread)5009 static void binder_free_thread(struct binder_thread *thread)
5010 {
5011 BUG_ON(!list_empty(&thread->todo));
5012 binder_stats_deleted(BINDER_STAT_THREAD);
5013 binder_proc_dec_tmpref(thread->proc);
5014 kfree(thread);
5015 }
5016
binder_thread_release(struct binder_proc *proc, struct binder_thread *thread)5017 static int binder_thread_release(struct binder_proc *proc,
5018 struct binder_thread *thread)
5019 {
5020 struct binder_transaction *t;
5021 struct binder_transaction *send_reply = NULL;
5022 int active_transactions = 0;
5023 struct binder_transaction *last_t = NULL;
5024
5025 binder_inner_proc_lock(thread->proc);
5026 /*
5027 * take a ref on the proc so it survives
5028 * after we remove this thread from proc->threads.
5029 * The corresponding dec is when we actually
5030 * free the thread in binder_free_thread()
5031 */
5032 proc->tmp_ref++;
5033 /*
5034 * take a ref on this thread to ensure it
5035 * survives while we are releasing it
5036 */
5037 atomic_inc(&thread->tmp_ref);
5038 rb_erase(&thread->rb_node, &proc->threads);
5039 t = thread->transaction_stack;
5040 if (t) {
5041 spin_lock(&t->lock);
5042 if (t->to_thread == thread)
5043 send_reply = t;
5044 } else {
5045 __acquire(&t->lock);
5046 }
5047 thread->is_dead = true;
5048
5049 while (t) {
5050 last_t = t;
5051 active_transactions++;
5052 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5053 "release %d:%d transaction %d %s, still active\n",
5054 proc->pid, thread->pid,
5055 t->debug_id,
5056 (t->to_thread == thread) ? "in" : "out");
5057
5058 if (t->to_thread == thread) {
5059 thread->proc->outstanding_txns--;
5060 t->to_proc = NULL;
5061 t->to_thread = NULL;
5062 if (t->buffer) {
5063 t->buffer->transaction = NULL;
5064 t->buffer = NULL;
5065 }
5066 t = t->to_parent;
5067 } else if (t->from == thread) {
5068 t->from = NULL;
5069 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
5070 t->from_pid = -1;
5071 t->from_tid = -1;
5072 #endif
5073 t = t->from_parent;
5074 } else
5075 BUG();
5076 spin_unlock(&last_t->lock);
5077 if (t)
5078 spin_lock(&t->lock);
5079 else
5080 __acquire(&t->lock);
5081 }
5082 /* annotation for sparse, lock not acquired in last iteration above */
5083 __release(&t->lock);
5084
5085 /*
5086 * If this thread used poll, make sure we remove the waitqueue from any
5087 * poll data structures holding it.
5088 */
5089 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5090 wake_up_pollfree(&thread->wait);
5091
5092 binder_inner_proc_unlock(thread->proc);
5093
5094 /*
5095 * This is needed to avoid races between wake_up_pollfree() above and
5096 * someone else removing the last entry from the queue for other reasons
5097 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5098 * descriptor being closed). Such other users hold an RCU read lock, so
5099 * we can be sure they're done after we call synchronize_rcu().
5100 */
5101 if (thread->looper & BINDER_LOOPER_STATE_POLL)
5102 synchronize_rcu();
5103
5104 if (send_reply)
5105 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5106 binder_release_work(proc, &thread->todo);
5107 binder_thread_dec_tmpref(thread);
5108 return active_transactions;
5109 }
5110
binder_poll(struct file *filp, struct poll_table_struct *wait)5111 static __poll_t binder_poll(struct file *filp,
5112 struct poll_table_struct *wait)
5113 {
5114 struct binder_proc *proc = filp->private_data;
5115 struct binder_thread *thread = NULL;
5116 bool wait_for_proc_work;
5117
5118 thread = binder_get_thread(proc);
5119 if (!thread)
5120 return EPOLLERR;
5121
5122 binder_inner_proc_lock(thread->proc);
5123 thread->looper |= BINDER_LOOPER_STATE_POLL;
5124 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5125
5126 binder_inner_proc_unlock(thread->proc);
5127
5128 poll_wait(filp, &thread->wait, wait);
5129
5130 if (binder_has_work(thread, wait_for_proc_work))
5131 return EPOLLIN;
5132
5133 return 0;
5134 }
5135
binder_ioctl_write_read(struct file *filp, unsigned int cmd, unsigned long arg, struct binder_thread *thread)5136 static int binder_ioctl_write_read(struct file *filp,
5137 unsigned int cmd, unsigned long arg,
5138 struct binder_thread *thread)
5139 {
5140 int ret = 0;
5141 struct binder_proc *proc = filp->private_data;
5142 unsigned int size = _IOC_SIZE(cmd);
5143 void __user *ubuf = (void __user *)arg;
5144 struct binder_write_read bwr;
5145
5146 if (size != sizeof(struct binder_write_read)) {
5147 ret = -EINVAL;
5148 goto out;
5149 }
5150
5151 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5152 ret = -EFAULT;
5153 goto out;
5154 }
5155 binder_debug(BINDER_DEBUG_READ_WRITE,
5156 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5157 proc->pid, thread->pid,
5158 (u64)bwr.write_size, (u64)bwr.write_buffer,
5159 (u64)bwr.read_size, (u64)bwr.read_buffer);
5160
5161 if (bwr.write_size > 0) {
5162 ret = binder_thread_write(proc, thread,
5163 bwr.write_buffer,
5164 bwr.write_size,
5165 &bwr.write_consumed);
5166 trace_binder_write_done(ret);
5167 if (ret < 0) {
5168 bwr.read_consumed = 0;
5169 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5170 ret = -EFAULT;
5171 goto out;
5172 }
5173 }
5174 if (bwr.read_size > 0) {
5175 ret = binder_thread_read(proc, thread, bwr.read_buffer,
5176 bwr.read_size,
5177 &bwr.read_consumed,
5178 filp->f_flags & O_NONBLOCK);
5179 trace_binder_read_done(ret);
5180 binder_inner_proc_lock(proc);
5181 if (!binder_worklist_empty_ilocked(&proc->todo))
5182 binder_wakeup_proc_ilocked(proc);
5183 binder_inner_proc_unlock(proc);
5184 if (ret < 0) {
5185 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5186 ret = -EFAULT;
5187 goto out;
5188 }
5189 }
5190 binder_debug(BINDER_DEBUG_READ_WRITE,
5191 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5192 proc->pid, thread->pid,
5193 (u64)bwr.write_consumed, (u64)bwr.write_size,
5194 (u64)bwr.read_consumed, (u64)bwr.read_size);
5195 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5196 ret = -EFAULT;
5197 goto out;
5198 }
5199 out:
5200 return ret;
5201 }
5202
binder_ioctl_set_ctx_mgr(struct file *filp, struct flat_binder_object *fbo)5203 static int binder_ioctl_set_ctx_mgr(struct file *filp,
5204 struct flat_binder_object *fbo)
5205 {
5206 int ret = 0;
5207 struct binder_proc *proc = filp->private_data;
5208 struct binder_context *context = proc->context;
5209 struct binder_node *new_node;
5210 kuid_t curr_euid = current_euid();
5211
5212 mutex_lock(&context->context_mgr_node_lock);
5213 if (context->binder_context_mgr_node) {
5214 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5215 ret = -EBUSY;
5216 goto out;
5217 }
5218 ret = security_binder_set_context_mgr(proc->cred);
5219 if (ret < 0)
5220 goto out;
5221 if (uid_valid(context->binder_context_mgr_uid)) {
5222 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5223 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5224 from_kuid(&init_user_ns, curr_euid),
5225 from_kuid(&init_user_ns,
5226 context->binder_context_mgr_uid));
5227 ret = -EPERM;
5228 goto out;
5229 }
5230 } else {
5231 context->binder_context_mgr_uid = curr_euid;
5232 }
5233 new_node = binder_new_node(proc, fbo);
5234 if (!new_node) {
5235 ret = -ENOMEM;
5236 goto out;
5237 }
5238 binder_node_lock(new_node);
5239 new_node->local_weak_refs++;
5240 new_node->local_strong_refs++;
5241 new_node->has_strong_ref = 1;
5242 new_node->has_weak_ref = 1;
5243 context->binder_context_mgr_node = new_node;
5244 binder_node_unlock(new_node);
5245 binder_put_node(new_node);
5246 out:
5247 mutex_unlock(&context->context_mgr_node_lock);
5248 return ret;
5249 }
5250
binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, struct binder_node_info_for_ref *info)5251 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5252 struct binder_node_info_for_ref *info)
5253 {
5254 struct binder_node *node;
5255 struct binder_context *context = proc->context;
5256 __u32 handle = info->handle;
5257
5258 if (info->strong_count || info->weak_count || info->reserved1 ||
5259 info->reserved2 || info->reserved3) {
5260 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5261 proc->pid);
5262 return -EINVAL;
5263 }
5264
5265 /* This ioctl may only be used by the context manager */
5266 mutex_lock(&context->context_mgr_node_lock);
5267 if (!context->binder_context_mgr_node ||
5268 context->binder_context_mgr_node->proc != proc) {
5269 mutex_unlock(&context->context_mgr_node_lock);
5270 return -EPERM;
5271 }
5272 mutex_unlock(&context->context_mgr_node_lock);
5273
5274 node = binder_get_node_from_ref(proc, handle, true, NULL);
5275 if (!node)
5276 return -EINVAL;
5277
5278 info->strong_count = node->local_strong_refs +
5279 node->internal_strong_refs;
5280 info->weak_count = node->local_weak_refs;
5281
5282 binder_put_node(node);
5283
5284 return 0;
5285 }
5286
binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info)5287 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5288 struct binder_node_debug_info *info)
5289 {
5290 struct rb_node *n;
5291 binder_uintptr_t ptr = info->ptr;
5292
5293 memset(info, 0, sizeof(*info));
5294
5295 binder_inner_proc_lock(proc);
5296 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5297 struct binder_node *node = rb_entry(n, struct binder_node,
5298 rb_node);
5299 if (node->ptr > ptr) {
5300 info->ptr = node->ptr;
5301 info->cookie = node->cookie;
5302 info->has_strong_ref = node->has_strong_ref;
5303 info->has_weak_ref = node->has_weak_ref;
5304 break;
5305 }
5306 }
5307 binder_inner_proc_unlock(proc);
5308
5309 return 0;
5310 }
5311
binder_txns_pending_ilocked(struct binder_proc *proc)5312 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5313 {
5314 struct rb_node *n;
5315 struct binder_thread *thread;
5316
5317 if (proc->outstanding_txns > 0)
5318 return true;
5319
5320 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5321 thread = rb_entry(n, struct binder_thread, rb_node);
5322 if (thread->transaction_stack)
5323 return true;
5324 }
5325 return false;
5326 }
5327
binder_ioctl_freeze(struct binder_freeze_info *info, struct binder_proc *target_proc)5328 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5329 struct binder_proc *target_proc)
5330 {
5331 int ret = 0;
5332
5333 if (!info->enable) {
5334 binder_inner_proc_lock(target_proc);
5335 target_proc->sync_recv = false;
5336 target_proc->async_recv = false;
5337 target_proc->is_frozen = false;
5338 binder_inner_proc_unlock(target_proc);
5339 return 0;
5340 }
5341
5342 /*
5343 * Freezing the target. Prevent new transactions by
5344 * setting frozen state. If timeout specified, wait
5345 * for transactions to drain.
5346 */
5347 binder_inner_proc_lock(target_proc);
5348 target_proc->sync_recv = false;
5349 target_proc->async_recv = false;
5350 target_proc->is_frozen = true;
5351 binder_inner_proc_unlock(target_proc);
5352
5353 if (info->timeout_ms > 0)
5354 ret = wait_event_interruptible_timeout(
5355 target_proc->freeze_wait,
5356 (!target_proc->outstanding_txns),
5357 msecs_to_jiffies(info->timeout_ms));
5358
5359 /* Check pending transactions that wait for reply */
5360 if (ret >= 0) {
5361 binder_inner_proc_lock(target_proc);
5362 if (binder_txns_pending_ilocked(target_proc))
5363 ret = -EAGAIN;
5364 binder_inner_proc_unlock(target_proc);
5365 }
5366
5367 if (ret < 0) {
5368 binder_inner_proc_lock(target_proc);
5369 target_proc->is_frozen = false;
5370 binder_inner_proc_unlock(target_proc);
5371 }
5372
5373 return ret;
5374 }
5375
binder_ioctl_get_freezer_info( struct binder_frozen_status_info *info)5376 static int binder_ioctl_get_freezer_info(
5377 struct binder_frozen_status_info *info)
5378 {
5379 struct binder_proc *target_proc;
5380 bool found = false;
5381 __u32 txns_pending;
5382
5383 info->sync_recv = 0;
5384 info->async_recv = 0;
5385
5386 mutex_lock(&binder_procs_lock);
5387 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5388 if (target_proc->pid == info->pid) {
5389 found = true;
5390 binder_inner_proc_lock(target_proc);
5391 txns_pending = binder_txns_pending_ilocked(target_proc);
5392 info->sync_recv |= target_proc->sync_recv |
5393 (txns_pending << 1);
5394 info->async_recv |= target_proc->async_recv;
5395 binder_inner_proc_unlock(target_proc);
5396 }
5397 }
5398 mutex_unlock(&binder_procs_lock);
5399
5400 if (!found)
5401 return -EINVAL;
5402
5403 return 0;
5404 }
5405
binder_ioctl_get_extended_error(struct binder_thread *thread, void __user *ubuf)5406 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5407 void __user *ubuf)
5408 {
5409 struct binder_extended_error ee;
5410
5411 binder_inner_proc_lock(thread->proc);
5412 ee = thread->ee;
5413 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5414 binder_inner_proc_unlock(thread->proc);
5415
5416 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5417 return -EFAULT;
5418
5419 return 0;
5420 }
5421
binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)5422 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5423 {
5424 int ret;
5425 struct binder_proc *proc = filp->private_data;
5426 struct binder_thread *thread;
5427 unsigned int size = _IOC_SIZE(cmd);
5428 void __user *ubuf = (void __user *)arg;
5429
5430 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5431 proc->pid, current->pid, cmd, arg);*/
5432
5433 binder_selftest_alloc(&proc->alloc);
5434
5435 trace_binder_ioctl(cmd, arg);
5436
5437 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5438 if (ret)
5439 goto err_unlocked;
5440
5441 thread = binder_get_thread(proc);
5442 if (thread == NULL) {
5443 ret = -ENOMEM;
5444 goto err;
5445 }
5446
5447 switch (cmd) {
5448 case BINDER_WRITE_READ:
5449 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5450 if (ret)
5451 goto err;
5452 break;
5453 case BINDER_SET_MAX_THREADS: {
5454 int max_threads;
5455
5456 if (copy_from_user(&max_threads, ubuf,
5457 sizeof(max_threads))) {
5458 ret = -EINVAL;
5459 goto err;
5460 }
5461 binder_inner_proc_lock(proc);
5462 proc->max_threads = max_threads;
5463 binder_inner_proc_unlock(proc);
5464 break;
5465 }
5466 case BINDER_SET_CONTEXT_MGR_EXT: {
5467 struct flat_binder_object fbo;
5468
5469 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5470 ret = -EINVAL;
5471 goto err;
5472 }
5473 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5474 if (ret)
5475 goto err;
5476 break;
5477 }
5478 case BINDER_SET_CONTEXT_MGR:
5479 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5480 if (ret)
5481 goto err;
5482 break;
5483 case BINDER_THREAD_EXIT:
5484 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5485 proc->pid, thread->pid);
5486 binder_thread_release(proc, thread);
5487 thread = NULL;
5488 break;
5489 case BINDER_VERSION: {
5490 struct binder_version __user *ver = ubuf;
5491
5492 if (size != sizeof(struct binder_version)) {
5493 ret = -EINVAL;
5494 goto err;
5495 }
5496 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5497 &ver->protocol_version)) {
5498 ret = -EINVAL;
5499 goto err;
5500 }
5501 break;
5502 }
5503 case BINDER_GET_NODE_INFO_FOR_REF: {
5504 struct binder_node_info_for_ref info;
5505
5506 if (copy_from_user(&info, ubuf, sizeof(info))) {
5507 ret = -EFAULT;
5508 goto err;
5509 }
5510
5511 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5512 if (ret < 0)
5513 goto err;
5514
5515 if (copy_to_user(ubuf, &info, sizeof(info))) {
5516 ret = -EFAULT;
5517 goto err;
5518 }
5519
5520 break;
5521 }
5522 case BINDER_GET_NODE_DEBUG_INFO: {
5523 struct binder_node_debug_info info;
5524
5525 if (copy_from_user(&info, ubuf, sizeof(info))) {
5526 ret = -EFAULT;
5527 goto err;
5528 }
5529
5530 ret = binder_ioctl_get_node_debug_info(proc, &info);
5531 if (ret < 0)
5532 goto err;
5533
5534 if (copy_to_user(ubuf, &info, sizeof(info))) {
5535 ret = -EFAULT;
5536 goto err;
5537 }
5538 break;
5539 }
5540 case BINDER_FREEZE: {
5541 struct binder_freeze_info info;
5542 struct binder_proc **target_procs = NULL, *target_proc;
5543 int target_procs_count = 0, i = 0;
5544
5545 ret = 0;
5546
5547 if (copy_from_user(&info, ubuf, sizeof(info))) {
5548 ret = -EFAULT;
5549 goto err;
5550 }
5551
5552 mutex_lock(&binder_procs_lock);
5553 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5554 if (target_proc->pid == info.pid)
5555 target_procs_count++;
5556 }
5557
5558 if (target_procs_count == 0) {
5559 mutex_unlock(&binder_procs_lock);
5560 ret = -EINVAL;
5561 goto err;
5562 }
5563
5564 target_procs = kcalloc(target_procs_count,
5565 sizeof(struct binder_proc *),
5566 GFP_KERNEL);
5567
5568 if (!target_procs) {
5569 mutex_unlock(&binder_procs_lock);
5570 ret = -ENOMEM;
5571 goto err;
5572 }
5573
5574 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5575 if (target_proc->pid != info.pid)
5576 continue;
5577
5578 binder_inner_proc_lock(target_proc);
5579 target_proc->tmp_ref++;
5580 binder_inner_proc_unlock(target_proc);
5581
5582 target_procs[i++] = target_proc;
5583 }
5584 mutex_unlock(&binder_procs_lock);
5585
5586 for (i = 0; i < target_procs_count; i++) {
5587 if (ret >= 0)
5588 ret = binder_ioctl_freeze(&info,
5589 target_procs[i]);
5590
5591 binder_proc_dec_tmpref(target_procs[i]);
5592 }
5593
5594 kfree(target_procs);
5595
5596 if (ret < 0)
5597 goto err;
5598 break;
5599 }
5600 case BINDER_GET_FROZEN_INFO: {
5601 struct binder_frozen_status_info info;
5602
5603 if (copy_from_user(&info, ubuf, sizeof(info))) {
5604 ret = -EFAULT;
5605 goto err;
5606 }
5607
5608 ret = binder_ioctl_get_freezer_info(&info);
5609 if (ret < 0)
5610 goto err;
5611
5612 if (copy_to_user(ubuf, &info, sizeof(info))) {
5613 ret = -EFAULT;
5614 goto err;
5615 }
5616 break;
5617 }
5618 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5619 uint32_t enable;
5620
5621 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5622 ret = -EFAULT;
5623 goto err;
5624 }
5625 binder_inner_proc_lock(proc);
5626 proc->oneway_spam_detection_enabled = (bool)enable;
5627 binder_inner_proc_unlock(proc);
5628 break;
5629 }
5630 case BINDER_GET_EXTENDED_ERROR:
5631 ret = binder_ioctl_get_extended_error(thread, ubuf);
5632 if (ret < 0)
5633 goto err;
5634 break;
5635 case BINDER_FEATURE_SET: {
5636 struct binder_feature_set __user *features = ubuf;
5637
5638 if (size != sizeof(struct binder_feature_set)) {
5639 ret = -EINVAL;
5640 goto err;
5641 }
5642 if (put_user(BINDER_CURRENT_FEATURE_SET, &features->feature_set)) {
5643 ret = -EINVAL;
5644 goto err;
5645 }
5646 break;
5647 }
5648 #ifdef CONFIG_ACCESS_TOKENID
5649 case BINDER_GET_ACCESS_TOKEN: {
5650 struct access_token __user *tokens = ubuf;
5651 u64 token, ftoken;
5652
5653 if (size != sizeof(struct access_token)) {
5654 ret = -EINVAL;
5655 goto err;
5656 }
5657 binder_inner_proc_lock(proc);
5658 token = thread->tokens.sender_tokenid;
5659 ftoken = thread->tokens.first_tokenid;
5660 binder_inner_proc_unlock(proc);
5661 if (put_user(token, &tokens->sender_tokenid)) {
5662 ret = -EINVAL;
5663 goto err;
5664 }
5665 if (put_user(ftoken, &tokens->first_tokenid)) {
5666 ret = -EINVAL;
5667 goto err;
5668 }
5669 break;
5670 }
5671 #endif /* CONFIG_ACCESS_TOKENID */
5672
5673 #ifdef CONFIG_BINDER_SENDER_INFO
5674 case BINDER_GET_SENDER_INFO: {
5675 struct binder_sender_info __user *sender = ubuf;
5676 u64 token, ftoken, sender_pid_nr;
5677 if (size != sizeof(struct binder_sender_info)) {
5678 ret = -EINVAL;
5679 goto err;
5680 }
5681 binder_inner_proc_lock(proc);
5682 #ifdef CONFIG_ACCESS_TOKENID
5683 token = thread->tokens.sender_tokenid;
5684 ftoken = thread->tokens.first_tokenid;
5685 #endif /*CONFIG_ACCESS_TOKENID*/
5686 sender_pid_nr = thread->sender_pid_nr;
5687 binder_inner_proc_unlock(proc);
5688 #ifdef CONFIG_ACCESS_TOKENID
5689 if (put_user(token, &sender->tokens.sender_tokenid)) {
5690 ret = -EFAULT;
5691 goto err;
5692 }
5693 if (put_user(ftoken, &sender->tokens.first_tokenid)) {
5694 ret = -EFAULT;
5695 goto err;
5696 }
5697 #endif /*CONFIG_ACCESS_TOKENID*/
5698 if (put_user(sender_pid_nr, &sender->sender_pid_nr)) {
5699 ret = -EFAULT;
5700 goto err;
5701 }
5702 break;
5703 }
5704 #endif /* CONFIG_BINDER_SENDER_INFO */
5705 default:
5706 ret = -EINVAL;
5707 goto err;
5708 }
5709 ret = 0;
5710 err:
5711 if (thread)
5712 thread->looper_need_return = false;
5713 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5714 if (ret && ret != -EINTR)
5715 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5716 err_unlocked:
5717 trace_binder_ioctl_done(ret);
5718 return ret;
5719 }
5720
binder_vma_open(struct vm_area_struct *vma)5721 static void binder_vma_open(struct vm_area_struct *vma)
5722 {
5723 struct binder_proc *proc = vma->vm_private_data;
5724
5725 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5726 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5727 proc->pid, vma->vm_start, vma->vm_end,
5728 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5729 (unsigned long)pgprot_val(vma->vm_page_prot));
5730 }
5731
binder_vma_close(struct vm_area_struct *vma)5732 static void binder_vma_close(struct vm_area_struct *vma)
5733 {
5734 struct binder_proc *proc = vma->vm_private_data;
5735
5736 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5737 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5738 proc->pid, vma->vm_start, vma->vm_end,
5739 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5740 (unsigned long)pgprot_val(vma->vm_page_prot));
5741 binder_alloc_vma_close(&proc->alloc);
5742 }
5743
binder_vm_fault(struct vm_fault *vmf)5744 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5745 {
5746 return VM_FAULT_SIGBUS;
5747 }
5748
5749 static const struct vm_operations_struct binder_vm_ops = {
5750 .open = binder_vma_open,
5751 .close = binder_vma_close,
5752 .fault = binder_vm_fault,
5753 };
5754
binder_mmap(struct file *filp, struct vm_area_struct *vma)5755 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5756 {
5757 struct binder_proc *proc = filp->private_data;
5758
5759 if (proc->tsk != current->group_leader)
5760 return -EINVAL;
5761
5762 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5763 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5764 __func__, proc->pid, vma->vm_start, vma->vm_end,
5765 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5766 (unsigned long)pgprot_val(vma->vm_page_prot));
5767
5768 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5769 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5770 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5771 return -EPERM;
5772 }
5773 vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
5774
5775 vma->vm_ops = &binder_vm_ops;
5776 vma->vm_private_data = proc;
5777
5778 return binder_alloc_mmap_handler(&proc->alloc, vma);
5779 }
5780
binder_open(struct inode *nodp, struct file *filp)5781 static int binder_open(struct inode *nodp, struct file *filp)
5782 {
5783 struct binder_proc *proc, *itr;
5784 struct binder_device *binder_dev;
5785 struct binderfs_info *info;
5786 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5787 bool existing_pid = false;
5788
5789 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5790 current->group_leader->pid, current->pid);
5791
5792 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5793 if (proc == NULL)
5794 return -ENOMEM;
5795 spin_lock_init(&proc->inner_lock);
5796 spin_lock_init(&proc->outer_lock);
5797 get_task_struct(current->group_leader);
5798 proc->tsk = current->group_leader;
5799 proc->cred = get_cred(filp->f_cred);
5800 INIT_LIST_HEAD(&proc->todo);
5801 init_waitqueue_head(&proc->freeze_wait);
5802 proc->default_priority = task_nice(current);
5803 /* binderfs stashes devices in i_private */
5804 if (is_binderfs_device(nodp)) {
5805 binder_dev = nodp->i_private;
5806 info = nodp->i_sb->s_fs_info;
5807 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5808 } else {
5809 binder_dev = container_of(filp->private_data,
5810 struct binder_device, miscdev);
5811 }
5812 refcount_inc(&binder_dev->ref);
5813 proc->context = &binder_dev->context;
5814 binder_alloc_init(&proc->alloc);
5815
5816 binder_stats_created(BINDER_STAT_PROC);
5817 proc->pid = current->group_leader->pid;
5818 INIT_LIST_HEAD(&proc->delivered_death);
5819 INIT_LIST_HEAD(&proc->waiting_threads);
5820 filp->private_data = proc;
5821
5822 mutex_lock(&binder_procs_lock);
5823 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5824 if (itr->pid == proc->pid) {
5825 existing_pid = true;
5826 break;
5827 }
5828 }
5829 hlist_add_head(&proc->proc_node, &binder_procs);
5830 mutex_unlock(&binder_procs_lock);
5831
5832 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5833 char strbuf[11];
5834
5835 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5836 /*
5837 * proc debug entries are shared between contexts.
5838 * Only create for the first PID to avoid debugfs log spamming
5839 * The printing code will anyway print all contexts for a given
5840 * PID so this is not a problem.
5841 */
5842 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5843 binder_debugfs_dir_entry_proc,
5844 (void *)(unsigned long)proc->pid,
5845 &proc_fops);
5846 }
5847
5848 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5849 char strbuf[11];
5850 struct dentry *binderfs_entry;
5851
5852 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5853 /*
5854 * Similar to debugfs, the process specific log file is shared
5855 * between contexts. Only create for the first PID.
5856 * This is ok since same as debugfs, the log file will contain
5857 * information on all contexts of a given PID.
5858 */
5859 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5860 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5861 if (!IS_ERR(binderfs_entry)) {
5862 proc->binderfs_entry = binderfs_entry;
5863 } else {
5864 int error;
5865
5866 error = PTR_ERR(binderfs_entry);
5867 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5868 strbuf, error);
5869 }
5870 }
5871
5872 return 0;
5873 }
5874
binder_flush(struct file *filp, fl_owner_t id)5875 static int binder_flush(struct file *filp, fl_owner_t id)
5876 {
5877 struct binder_proc *proc = filp->private_data;
5878
5879 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5880
5881 return 0;
5882 }
5883
binder_deferred_flush(struct binder_proc *proc)5884 static void binder_deferred_flush(struct binder_proc *proc)
5885 {
5886 struct rb_node *n;
5887 int wake_count = 0;
5888
5889 binder_inner_proc_lock(proc);
5890 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5891 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5892
5893 thread->looper_need_return = true;
5894 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5895 wake_up_interruptible(&thread->wait);
5896 wake_count++;
5897 }
5898 }
5899 binder_inner_proc_unlock(proc);
5900
5901 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5902 "binder_flush: %d woke %d threads\n", proc->pid,
5903 wake_count);
5904 }
5905
binder_release(struct inode *nodp, struct file *filp)5906 static int binder_release(struct inode *nodp, struct file *filp)
5907 {
5908 struct binder_proc *proc = filp->private_data;
5909
5910 debugfs_remove(proc->debugfs_entry);
5911
5912 if (proc->binderfs_entry) {
5913 binderfs_remove_file(proc->binderfs_entry);
5914 proc->binderfs_entry = NULL;
5915 }
5916
5917 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5918
5919 return 0;
5920 }
5921
binder_node_release(struct binder_node *node, int refs)5922 static int binder_node_release(struct binder_node *node, int refs)
5923 {
5924 struct binder_ref *ref;
5925 int death = 0;
5926 struct binder_proc *proc = node->proc;
5927
5928 binder_release_work(proc, &node->async_todo);
5929
5930 binder_node_lock(node);
5931 binder_inner_proc_lock(proc);
5932 binder_dequeue_work_ilocked(&node->work);
5933 /*
5934 * The caller must have taken a temporary ref on the node,
5935 */
5936 BUG_ON(!node->tmp_refs);
5937 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5938 binder_inner_proc_unlock(proc);
5939 binder_node_unlock(node);
5940 binder_free_node(node);
5941
5942 return refs;
5943 }
5944
5945 node->proc = NULL;
5946 node->local_strong_refs = 0;
5947 node->local_weak_refs = 0;
5948 binder_inner_proc_unlock(proc);
5949
5950 spin_lock(&binder_dead_nodes_lock);
5951 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5952 spin_unlock(&binder_dead_nodes_lock);
5953
5954 hlist_for_each_entry(ref, &node->refs, node_entry) {
5955 refs++;
5956 /*
5957 * Need the node lock to synchronize
5958 * with new notification requests and the
5959 * inner lock to synchronize with queued
5960 * death notifications.
5961 */
5962 binder_inner_proc_lock(ref->proc);
5963 if (!ref->death) {
5964 binder_inner_proc_unlock(ref->proc);
5965 continue;
5966 }
5967
5968 death++;
5969
5970 BUG_ON(!list_empty(&ref->death->work.entry));
5971 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5972 binder_enqueue_work_ilocked(&ref->death->work,
5973 &ref->proc->todo);
5974 binder_wakeup_proc_ilocked(ref->proc);
5975 binder_inner_proc_unlock(ref->proc);
5976 }
5977
5978 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5979 "node %d now dead, refs %d, death %d\n",
5980 node->debug_id, refs, death);
5981 binder_node_unlock(node);
5982 binder_put_node(node);
5983
5984 return refs;
5985 }
5986
binder_deferred_release(struct binder_proc *proc)5987 static void binder_deferred_release(struct binder_proc *proc)
5988 {
5989 struct binder_context *context = proc->context;
5990 struct rb_node *n;
5991 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5992
5993 mutex_lock(&binder_procs_lock);
5994 hlist_del(&proc->proc_node);
5995 mutex_unlock(&binder_procs_lock);
5996
5997 mutex_lock(&context->context_mgr_node_lock);
5998 if (context->binder_context_mgr_node &&
5999 context->binder_context_mgr_node->proc == proc) {
6000 binder_debug(BINDER_DEBUG_DEAD_BINDER,
6001 "%s: %d context_mgr_node gone\n",
6002 __func__, proc->pid);
6003 context->binder_context_mgr_node = NULL;
6004 }
6005 mutex_unlock(&context->context_mgr_node_lock);
6006 binder_inner_proc_lock(proc);
6007 /*
6008 * Make sure proc stays alive after we
6009 * remove all the threads
6010 */
6011 proc->tmp_ref++;
6012
6013 proc->is_dead = true;
6014 proc->is_frozen = false;
6015 proc->sync_recv = false;
6016 proc->async_recv = false;
6017 threads = 0;
6018 active_transactions = 0;
6019 while ((n = rb_first(&proc->threads))) {
6020 struct binder_thread *thread;
6021
6022 thread = rb_entry(n, struct binder_thread, rb_node);
6023 binder_inner_proc_unlock(proc);
6024 threads++;
6025 active_transactions += binder_thread_release(proc, thread);
6026 binder_inner_proc_lock(proc);
6027 }
6028
6029 nodes = 0;
6030 incoming_refs = 0;
6031 while ((n = rb_first(&proc->nodes))) {
6032 struct binder_node *node;
6033
6034 node = rb_entry(n, struct binder_node, rb_node);
6035 nodes++;
6036 /*
6037 * take a temporary ref on the node before
6038 * calling binder_node_release() which will either
6039 * kfree() the node or call binder_put_node()
6040 */
6041 binder_inc_node_tmpref_ilocked(node);
6042 rb_erase(&node->rb_node, &proc->nodes);
6043 binder_inner_proc_unlock(proc);
6044 incoming_refs = binder_node_release(node, incoming_refs);
6045 binder_inner_proc_lock(proc);
6046 }
6047 binder_inner_proc_unlock(proc);
6048
6049 outgoing_refs = 0;
6050 binder_proc_lock(proc);
6051 while ((n = rb_first(&proc->refs_by_desc))) {
6052 struct binder_ref *ref;
6053
6054 ref = rb_entry(n, struct binder_ref, rb_node_desc);
6055 outgoing_refs++;
6056 binder_cleanup_ref_olocked(ref);
6057 binder_proc_unlock(proc);
6058 binder_free_ref(ref);
6059 binder_proc_lock(proc);
6060 }
6061 binder_proc_unlock(proc);
6062
6063 binder_release_work(proc, &proc->todo);
6064 binder_release_work(proc, &proc->delivered_death);
6065
6066 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6067 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6068 __func__, proc->pid, threads, nodes, incoming_refs,
6069 outgoing_refs, active_transactions);
6070
6071 binder_proc_dec_tmpref(proc);
6072 }
6073
binder_deferred_func(struct work_struct *work)6074 static void binder_deferred_func(struct work_struct *work)
6075 {
6076 struct binder_proc *proc;
6077
6078 int defer;
6079
6080 do {
6081 mutex_lock(&binder_deferred_lock);
6082 if (!hlist_empty(&binder_deferred_list)) {
6083 proc = hlist_entry(binder_deferred_list.first,
6084 struct binder_proc, deferred_work_node);
6085 hlist_del_init(&proc->deferred_work_node);
6086 defer = proc->deferred_work;
6087 proc->deferred_work = 0;
6088 } else {
6089 proc = NULL;
6090 defer = 0;
6091 }
6092 mutex_unlock(&binder_deferred_lock);
6093
6094 if (defer & BINDER_DEFERRED_FLUSH)
6095 binder_deferred_flush(proc);
6096
6097 if (defer & BINDER_DEFERRED_RELEASE)
6098 binder_deferred_release(proc); /* frees proc */
6099 } while (proc);
6100 }
6101 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6102
6103 static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)6104 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6105 {
6106 mutex_lock(&binder_deferred_lock);
6107 proc->deferred_work |= defer;
6108 if (hlist_unhashed(&proc->deferred_work_node)) {
6109 hlist_add_head(&proc->deferred_work_node,
6110 &binder_deferred_list);
6111 schedule_work(&binder_deferred_work);
6112 }
6113 mutex_unlock(&binder_deferred_lock);
6114 }
6115
print_binder_transaction_ilocked(struct seq_file *m, struct binder_proc *proc, const char *prefix, struct binder_transaction *t)6116 static void print_binder_transaction_ilocked(struct seq_file *m,
6117 struct binder_proc *proc,
6118 const char *prefix,
6119 struct binder_transaction *t)
6120 {
6121 struct binder_proc *to_proc;
6122 struct binder_buffer *buffer = t->buffer;
6123 ktime_t current_time = ktime_get();
6124
6125 spin_lock(&t->lock);
6126 to_proc = t->to_proc;
6127 seq_printf(m,
6128 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
6129 prefix, t->debug_id, t,
6130 t->from_pid,
6131 t->from_tid,
6132 to_proc ? to_proc->pid : 0,
6133 t->to_thread ? t->to_thread->pid : 0,
6134 t->code, t->flags, t->priority, t->need_reply,
6135 ktime_ms_delta(current_time, t->start_time));
6136 spin_unlock(&t->lock);
6137
6138 if (proc != to_proc) {
6139 /*
6140 * Can only safely deref buffer if we are holding the
6141 * correct proc inner lock for this node
6142 */
6143 seq_puts(m, "\n");
6144 return;
6145 }
6146
6147 if (buffer == NULL) {
6148 seq_puts(m, " buffer free\n");
6149 return;
6150 }
6151 if (buffer->target_node)
6152 seq_printf(m, " node %d", buffer->target_node->debug_id);
6153 seq_printf(m, " size %zd:%zd data %pK\n",
6154 buffer->data_size, buffer->offsets_size,
6155 buffer->user_data);
6156 }
6157
print_binder_work_ilocked(struct seq_file *m, struct binder_proc *proc, const char *prefix, const char *transaction_prefix, struct binder_work *w)6158 static void print_binder_work_ilocked(struct seq_file *m,
6159 struct binder_proc *proc,
6160 const char *prefix,
6161 const char *transaction_prefix,
6162 struct binder_work *w)
6163 {
6164 struct binder_node *node;
6165 struct binder_transaction *t;
6166
6167 switch (w->type) {
6168 case BINDER_WORK_TRANSACTION:
6169 t = container_of(w, struct binder_transaction, work);
6170 print_binder_transaction_ilocked(
6171 m, proc, transaction_prefix, t);
6172 break;
6173 case BINDER_WORK_RETURN_ERROR: {
6174 struct binder_error *e = container_of(
6175 w, struct binder_error, work);
6176
6177 seq_printf(m, "%stransaction error: %u\n",
6178 prefix, e->cmd);
6179 } break;
6180 case BINDER_WORK_TRANSACTION_COMPLETE:
6181 seq_printf(m, "%stransaction complete\n", prefix);
6182 break;
6183 case BINDER_WORK_NODE:
6184 node = container_of(w, struct binder_node, work);
6185 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6186 prefix, node->debug_id,
6187 (u64)node->ptr, (u64)node->cookie);
6188 break;
6189 case BINDER_WORK_DEAD_BINDER:
6190 seq_printf(m, "%shas dead binder\n", prefix);
6191 break;
6192 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6193 seq_printf(m, "%shas cleared dead binder\n", prefix);
6194 break;
6195 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6196 seq_printf(m, "%shas cleared death notification\n", prefix);
6197 break;
6198 default:
6199 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6200 break;
6201 }
6202 }
6203
print_binder_thread_ilocked(struct seq_file *m, struct binder_thread *thread, int print_always)6204 static void print_binder_thread_ilocked(struct seq_file *m,
6205 struct binder_thread *thread,
6206 int print_always)
6207 {
6208 struct binder_transaction *t;
6209 struct binder_work *w;
6210 size_t start_pos = m->count;
6211 size_t header_pos;
6212
6213 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
6214 thread->pid, thread->looper,
6215 thread->looper_need_return,
6216 atomic_read(&thread->tmp_ref));
6217 header_pos = m->count;
6218 t = thread->transaction_stack;
6219 while (t) {
6220 if (t->from == thread) {
6221 print_binder_transaction_ilocked(m, thread->proc,
6222 " outgoing transaction", t);
6223 t = t->from_parent;
6224 } else if (t->to_thread == thread) {
6225 print_binder_transaction_ilocked(m, thread->proc,
6226 " incoming transaction", t);
6227 t = t->to_parent;
6228 } else {
6229 print_binder_transaction_ilocked(m, thread->proc,
6230 " bad transaction", t);
6231 t = NULL;
6232 }
6233 }
6234 list_for_each_entry(w, &thread->todo, entry) {
6235 print_binder_work_ilocked(m, thread->proc, " ",
6236 " pending transaction", w);
6237 }
6238 if (!print_always && m->count == header_pos)
6239 m->count = start_pos;
6240 }
6241
print_binder_node_nilocked(struct seq_file *m, struct binder_node *node)6242 static void print_binder_node_nilocked(struct seq_file *m,
6243 struct binder_node *node)
6244 {
6245 struct binder_ref *ref;
6246 struct binder_work *w;
6247 int count;
6248
6249 count = 0;
6250 hlist_for_each_entry(ref, &node->refs, node_entry)
6251 count++;
6252
6253 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6254 node->debug_id, (u64)node->ptr, (u64)node->cookie,
6255 node->has_strong_ref, node->has_weak_ref,
6256 node->local_strong_refs, node->local_weak_refs,
6257 node->internal_strong_refs, count, node->tmp_refs);
6258 if (count) {
6259 seq_puts(m, " proc");
6260 hlist_for_each_entry(ref, &node->refs, node_entry)
6261 seq_printf(m, " %d", ref->proc->pid);
6262 }
6263 seq_puts(m, "\n");
6264 if (node->proc) {
6265 list_for_each_entry(w, &node->async_todo, entry)
6266 print_binder_work_ilocked(m, node->proc, " ",
6267 " pending async transaction", w);
6268 }
6269 }
6270
print_binder_ref_olocked(struct seq_file *m, struct binder_ref *ref)6271 static void print_binder_ref_olocked(struct seq_file *m,
6272 struct binder_ref *ref)
6273 {
6274 binder_node_lock(ref->node);
6275 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
6276 ref->data.debug_id, ref->data.desc,
6277 ref->node->proc ? "" : "dead ",
6278 ref->node->debug_id, ref->data.strong,
6279 ref->data.weak, ref->death);
6280 binder_node_unlock(ref->node);
6281 }
6282
print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all)6283 static void print_binder_proc(struct seq_file *m,
6284 struct binder_proc *proc, int print_all)
6285 {
6286 struct binder_work *w;
6287 struct rb_node *n;
6288 size_t start_pos = m->count;
6289 size_t header_pos;
6290 struct binder_node *last_node = NULL;
6291
6292 seq_printf(m, "proc %d\n", proc->pid);
6293 seq_printf(m, "context %s\n", proc->context->name);
6294 header_pos = m->count;
6295
6296 binder_inner_proc_lock(proc);
6297 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6298 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6299 rb_node), print_all);
6300
6301 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6302 struct binder_node *node = rb_entry(n, struct binder_node,
6303 rb_node);
6304 if (!print_all && !node->has_async_transaction)
6305 continue;
6306
6307 /*
6308 * take a temporary reference on the node so it
6309 * survives and isn't removed from the tree
6310 * while we print it.
6311 */
6312 binder_inc_node_tmpref_ilocked(node);
6313 /* Need to drop inner lock to take node lock */
6314 binder_inner_proc_unlock(proc);
6315 if (last_node)
6316 binder_put_node(last_node);
6317 binder_node_inner_lock(node);
6318 print_binder_node_nilocked(m, node);
6319 binder_node_inner_unlock(node);
6320 last_node = node;
6321 binder_inner_proc_lock(proc);
6322 }
6323 binder_inner_proc_unlock(proc);
6324 if (last_node)
6325 binder_put_node(last_node);
6326
6327 if (print_all) {
6328 binder_proc_lock(proc);
6329 for (n = rb_first(&proc->refs_by_desc);
6330 n != NULL;
6331 n = rb_next(n))
6332 print_binder_ref_olocked(m, rb_entry(n,
6333 struct binder_ref,
6334 rb_node_desc));
6335 binder_proc_unlock(proc);
6336 }
6337 binder_alloc_print_allocated(m, &proc->alloc);
6338 binder_inner_proc_lock(proc);
6339 list_for_each_entry(w, &proc->todo, entry)
6340 print_binder_work_ilocked(m, proc, " ",
6341 " pending transaction", w);
6342 list_for_each_entry(w, &proc->delivered_death, entry) {
6343 seq_puts(m, " has delivered dead binder\n");
6344 break;
6345 }
6346 binder_inner_proc_unlock(proc);
6347 if (!print_all && m->count == header_pos)
6348 m->count = start_pos;
6349 }
6350
6351 static const char * const binder_return_strings[] = {
6352 "BR_ERROR",
6353 "BR_OK",
6354 "BR_TRANSACTION",
6355 "BR_REPLY",
6356 "BR_ACQUIRE_RESULT",
6357 "BR_DEAD_REPLY",
6358 "BR_TRANSACTION_COMPLETE",
6359 "BR_INCREFS",
6360 "BR_ACQUIRE",
6361 "BR_RELEASE",
6362 "BR_DECREFS",
6363 "BR_ATTEMPT_ACQUIRE",
6364 "BR_NOOP",
6365 "BR_SPAWN_LOOPER",
6366 "BR_FINISHED",
6367 "BR_DEAD_BINDER",
6368 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6369 "BR_FAILED_REPLY",
6370 "BR_FROZEN_REPLY",
6371 "BR_ONEWAY_SPAM_SUSPECT",
6372 "BR_TRANSACTION_PENDING_FROZEN"
6373 };
6374
6375 static const char * const binder_command_strings[] = {
6376 "BC_TRANSACTION",
6377 "BC_REPLY",
6378 "BC_ACQUIRE_RESULT",
6379 "BC_FREE_BUFFER",
6380 "BC_INCREFS",
6381 "BC_ACQUIRE",
6382 "BC_RELEASE",
6383 "BC_DECREFS",
6384 "BC_INCREFS_DONE",
6385 "BC_ACQUIRE_DONE",
6386 "BC_ATTEMPT_ACQUIRE",
6387 "BC_REGISTER_LOOPER",
6388 "BC_ENTER_LOOPER",
6389 "BC_EXIT_LOOPER",
6390 "BC_REQUEST_DEATH_NOTIFICATION",
6391 "BC_CLEAR_DEATH_NOTIFICATION",
6392 "BC_DEAD_BINDER_DONE",
6393 "BC_TRANSACTION_SG",
6394 "BC_REPLY_SG",
6395 };
6396
6397 static const char * const binder_objstat_strings[] = {
6398 "proc",
6399 "thread",
6400 "node",
6401 "ref",
6402 "death",
6403 "transaction",
6404 "transaction_complete"
6405 };
6406
print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats)6407 static void print_binder_stats(struct seq_file *m, const char *prefix,
6408 struct binder_stats *stats)
6409 {
6410 int i;
6411
6412 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6413 ARRAY_SIZE(binder_command_strings));
6414 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6415 int temp = atomic_read(&stats->bc[i]);
6416
6417 if (temp)
6418 seq_printf(m, "%s%s: %d\n", prefix,
6419 binder_command_strings[i], temp);
6420 }
6421
6422 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6423 ARRAY_SIZE(binder_return_strings));
6424 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6425 int temp = atomic_read(&stats->br[i]);
6426
6427 if (temp)
6428 seq_printf(m, "%s%s: %d\n", prefix,
6429 binder_return_strings[i], temp);
6430 }
6431
6432 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6433 ARRAY_SIZE(binder_objstat_strings));
6434 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6435 ARRAY_SIZE(stats->obj_deleted));
6436 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6437 int created = atomic_read(&stats->obj_created[i]);
6438 int deleted = atomic_read(&stats->obj_deleted[i]);
6439
6440 if (created || deleted)
6441 seq_printf(m, "%s%s: active %d total %d\n",
6442 prefix,
6443 binder_objstat_strings[i],
6444 created - deleted,
6445 created);
6446 }
6447 }
6448
print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc)6449 static void print_binder_proc_stats(struct seq_file *m,
6450 struct binder_proc *proc)
6451 {
6452 struct binder_work *w;
6453 struct binder_thread *thread;
6454 struct rb_node *n;
6455 int count, strong, weak, ready_threads;
6456 size_t free_async_space =
6457 binder_alloc_get_free_async_space(&proc->alloc);
6458
6459 seq_printf(m, "proc %d\n", proc->pid);
6460 seq_printf(m, "context %s\n", proc->context->name);
6461 count = 0;
6462 ready_threads = 0;
6463 binder_inner_proc_lock(proc);
6464 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6465 count++;
6466
6467 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6468 ready_threads++;
6469
6470 seq_printf(m, " threads: %d\n", count);
6471 seq_printf(m, " requested threads: %d+%d/%d\n"
6472 " ready threads %d\n"
6473 " free async space %zd\n", proc->requested_threads,
6474 proc->requested_threads_started, proc->max_threads,
6475 ready_threads,
6476 free_async_space);
6477 count = 0;
6478 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6479 count++;
6480 binder_inner_proc_unlock(proc);
6481 seq_printf(m, " nodes: %d\n", count);
6482 count = 0;
6483 strong = 0;
6484 weak = 0;
6485 binder_proc_lock(proc);
6486 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6487 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6488 rb_node_desc);
6489 count++;
6490 strong += ref->data.strong;
6491 weak += ref->data.weak;
6492 }
6493 binder_proc_unlock(proc);
6494 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6495
6496 count = binder_alloc_get_allocated_count(&proc->alloc);
6497 seq_printf(m, " buffers: %d\n", count);
6498
6499 binder_alloc_print_pages(m, &proc->alloc);
6500
6501 count = 0;
6502 binder_inner_proc_lock(proc);
6503 list_for_each_entry(w, &proc->todo, entry) {
6504 if (w->type == BINDER_WORK_TRANSACTION)
6505 count++;
6506 }
6507 binder_inner_proc_unlock(proc);
6508 seq_printf(m, " pending transactions: %d\n", count);
6509
6510 print_binder_stats(m, " ", &proc->stats);
6511 }
6512
state_show(struct seq_file *m, void *unused)6513 static int state_show(struct seq_file *m, void *unused)
6514 {
6515 struct binder_proc *proc;
6516 struct binder_node *node;
6517 struct binder_node *last_node = NULL;
6518
6519 seq_puts(m, "binder state:\n");
6520
6521 spin_lock(&binder_dead_nodes_lock);
6522 if (!hlist_empty(&binder_dead_nodes))
6523 seq_puts(m, "dead nodes:\n");
6524 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6525 /*
6526 * take a temporary reference on the node so it
6527 * survives and isn't removed from the list
6528 * while we print it.
6529 */
6530 node->tmp_refs++;
6531 spin_unlock(&binder_dead_nodes_lock);
6532 if (last_node)
6533 binder_put_node(last_node);
6534 binder_node_lock(node);
6535 print_binder_node_nilocked(m, node);
6536 binder_node_unlock(node);
6537 last_node = node;
6538 spin_lock(&binder_dead_nodes_lock);
6539 }
6540 spin_unlock(&binder_dead_nodes_lock);
6541 if (last_node)
6542 binder_put_node(last_node);
6543
6544 mutex_lock(&binder_procs_lock);
6545 hlist_for_each_entry(proc, &binder_procs, proc_node)
6546 print_binder_proc(m, proc, 1);
6547 mutex_unlock(&binder_procs_lock);
6548
6549 return 0;
6550 }
6551
stats_show(struct seq_file *m, void *unused)6552 static int stats_show(struct seq_file *m, void *unused)
6553 {
6554 struct binder_proc *proc;
6555
6556 seq_puts(m, "binder stats:\n");
6557
6558 print_binder_stats(m, "", &binder_stats);
6559
6560 mutex_lock(&binder_procs_lock);
6561 hlist_for_each_entry(proc, &binder_procs, proc_node)
6562 print_binder_proc_stats(m, proc);
6563 mutex_unlock(&binder_procs_lock);
6564
6565 return 0;
6566 }
6567
transactions_show(struct seq_file *m, void *unused)6568 static int transactions_show(struct seq_file *m, void *unused)
6569 {
6570 struct binder_proc *proc;
6571
6572 seq_puts(m, "binder transactions:\n");
6573 mutex_lock(&binder_procs_lock);
6574 hlist_for_each_entry(proc, &binder_procs, proc_node)
6575 print_binder_proc(m, proc, 0);
6576 mutex_unlock(&binder_procs_lock);
6577
6578 return 0;
6579 }
6580
proc_show(struct seq_file *m, void *unused)6581 static int proc_show(struct seq_file *m, void *unused)
6582 {
6583 struct binder_proc *itr;
6584 int pid = (unsigned long)m->private;
6585
6586 mutex_lock(&binder_procs_lock);
6587 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6588 if (itr->pid == pid) {
6589 seq_puts(m, "binder proc state:\n");
6590 print_binder_proc(m, itr, 1);
6591 }
6592 }
6593 mutex_unlock(&binder_procs_lock);
6594
6595 return 0;
6596 }
6597
print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e)6598 static void print_binder_transaction_log_entry(struct seq_file *m,
6599 struct binder_transaction_log_entry *e)
6600 {
6601 int debug_id = READ_ONCE(e->debug_id_done);
6602 /*
6603 * read barrier to guarantee debug_id_done read before
6604 * we print the log values
6605 */
6606 smp_rmb();
6607 seq_printf(m,
6608 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6609 e->debug_id, (e->call_type == 2) ? "reply" :
6610 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6611 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6612 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6613 e->return_error, e->return_error_param,
6614 e->return_error_line);
6615 /*
6616 * read-barrier to guarantee read of debug_id_done after
6617 * done printing the fields of the entry
6618 */
6619 smp_rmb();
6620 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6621 "\n" : " (incomplete)\n");
6622 }
6623
transaction_log_show(struct seq_file *m, void *unused)6624 static int transaction_log_show(struct seq_file *m, void *unused)
6625 {
6626 struct binder_transaction_log *log = m->private;
6627 unsigned int log_cur = atomic_read(&log->cur);
6628 unsigned int count;
6629 unsigned int cur;
6630 int i;
6631
6632 count = log_cur + 1;
6633 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6634 0 : count % ARRAY_SIZE(log->entry);
6635 if (count > ARRAY_SIZE(log->entry) || log->full)
6636 count = ARRAY_SIZE(log->entry);
6637 for (i = 0; i < count; i++) {
6638 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6639
6640 print_binder_transaction_log_entry(m, &log->entry[index]);
6641 }
6642 return 0;
6643 }
6644
6645 const struct file_operations binder_fops = {
6646 .owner = THIS_MODULE,
6647 .poll = binder_poll,
6648 .unlocked_ioctl = binder_ioctl,
6649 .compat_ioctl = compat_ptr_ioctl,
6650 .mmap = binder_mmap,
6651 .open = binder_open,
6652 .flush = binder_flush,
6653 .release = binder_release,
6654 };
6655
6656 DEFINE_SHOW_ATTRIBUTE(state);
6657 DEFINE_SHOW_ATTRIBUTE(stats);
6658 DEFINE_SHOW_ATTRIBUTE(transactions);
6659 DEFINE_SHOW_ATTRIBUTE(transaction_log);
6660
6661 const struct binder_debugfs_entry binder_debugfs_entries[] = {
6662 {
6663 .name = "state",
6664 .mode = 0444,
6665 .fops = &state_fops,
6666 .data = NULL,
6667 },
6668 {
6669 .name = "stats",
6670 .mode = 0444,
6671 .fops = &stats_fops,
6672 .data = NULL,
6673 },
6674 {
6675 .name = "transactions",
6676 .mode = 0444,
6677 .fops = &transactions_fops,
6678 .data = NULL,
6679 },
6680 {
6681 .name = "transaction_log",
6682 .mode = 0444,
6683 .fops = &transaction_log_fops,
6684 .data = &binder_transaction_log,
6685 },
6686 {
6687 .name = "failed_transaction_log",
6688 .mode = 0444,
6689 .fops = &transaction_log_fops,
6690 .data = &binder_transaction_log_failed,
6691 },
6692 {} /* terminator */
6693 };
6694
6695 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
print_binder_transaction_brief_ilocked( struct seq_file *m, const char *prefix, struct binder_transaction *t, u64 timestamp)6696 static void print_binder_transaction_brief_ilocked(
6697 struct seq_file *m,
6698 const char *prefix, struct binder_transaction *t,
6699 u64 timestamp)
6700 {
6701 struct binder_proc *to_proc = NULL;
6702 int from_pid = 0;
6703 int from_tid = 0;
6704 int to_pid = 0;
6705 u64 sec;
6706 u32 nsec;
6707
6708 spin_lock(&t->lock);
6709 to_proc = t->to_proc;
6710 from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->from_pid;
6711 from_tid = t->from ? t->from->pid : t->from_tid;
6712 to_pid = to_proc ? to_proc->pid : 0;
6713 sec = div_u64_rem((timestamp - t->timestamp), 1000000000, &nsec);
6714
6715 seq_printf(m,
6716 "%s%d:%d to %d:%d code %x wait:%llu.%u s\n",
6717 prefix,
6718 from_pid, from_tid,
6719 to_pid, t->to_thread ? t->to_thread->pid : 0,
6720 t->code,
6721 timestamp > t->timestamp ? sec : 0,
6722 timestamp > t->timestamp ? nsec : 0);
6723 spin_unlock(&t->lock);
6724 }
6725
print_binder_work_transaction_nilocked(struct seq_file *m, const char *prefix, struct binder_work *w, u64 timestamp)6726 static void print_binder_work_transaction_nilocked(struct seq_file *m,
6727 const char *prefix, struct binder_work *w,
6728 u64 timestamp)
6729 {
6730 struct binder_transaction *t = NULL;
6731
6732 switch (w->type) {
6733 case BINDER_WORK_TRANSACTION:
6734 t = container_of(w, struct binder_transaction, work);
6735 print_binder_transaction_brief_ilocked(m, prefix, t, timestamp);
6736 break;
6737
6738 default:
6739 break;
6740 }
6741 }
6742
print_binder_transaction_brief(struct seq_file *m, struct binder_proc *proc, u64 timestamp)6743 static void print_binder_transaction_brief(struct seq_file *m,
6744 struct binder_proc *proc,
6745 u64 timestamp)
6746 {
6747 struct binder_work *w = NULL;
6748 struct rb_node *n = NULL;
6749 struct binder_node *last_node = NULL;
6750 size_t start_pos = m->count;
6751 size_t header_pos = m->count;
6752
6753 /* sync binder / not one way */
6754 binder_inner_proc_lock(proc);
6755 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6756 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6757 struct binder_transaction *t = thread->transaction_stack;
6758 while (t) {
6759 if (t->from == thread) {
6760 print_binder_transaction_brief_ilocked(m, "\t", t, timestamp);
6761 t = t->from_parent;
6762 } else if (t->to_thread == thread) {
6763 t = t->to_parent;
6764 } else {
6765 t = NULL;
6766 }
6767 }
6768 }
6769
6770 /* async binder / one way */
6771 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6772 struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
6773 /*
6774 * take a temporary reference on the node so it
6775 * survives and isn't removed from the tree
6776 * while we print it.
6777 */
6778 binder_inc_node_tmpref_ilocked(node);
6779 /* Need to drop inner lock to take node lock */
6780 binder_inner_proc_unlock(proc);
6781 if (last_node)
6782 binder_put_node(last_node);
6783 binder_node_inner_lock(node);
6784 list_for_each_entry(w, &node->async_todo, entry)
6785 print_binder_work_transaction_nilocked(m, "async\t", w, timestamp);
6786 binder_node_inner_unlock(node);
6787 last_node = node;
6788 binder_inner_proc_lock(proc);
6789 }
6790 binder_inner_proc_unlock(proc);
6791
6792 if (last_node)
6793 binder_put_node(last_node);
6794
6795 if (m->count == header_pos)
6796 m->count = start_pos;
6797 }
6798
print_binder_proc_brief(struct seq_file *m, struct binder_proc *proc)6799 static void print_binder_proc_brief(struct seq_file *m,
6800 struct binder_proc *proc)
6801 {
6802 struct binder_thread *thread = NULL;
6803 int ready_threads = 0;
6804 size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc);
6805
6806 seq_printf(m, "%d\t", proc->pid);
6807 seq_printf(m, "%s\t", proc->context->name);
6808
6809 binder_inner_proc_lock(proc);
6810 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6811 ready_threads++;
6812
6813 seq_printf(m, "%d\t%d\t%d\t%d"
6814 "\t%zd\n", proc->requested_threads,
6815 proc->requested_threads_started, proc->max_threads,
6816 ready_threads,
6817 free_async_space);
6818 binder_inner_proc_unlock(proc);
6819 }
6820
binder_transaction_proc_show(struct seq_file *m, void *unused)6821 static int binder_transaction_proc_show(struct seq_file *m, void *unused)
6822 {
6823 struct binder_proc *proc = NULL;
6824 u64 now = 0;
6825
6826 mutex_lock(&binder_procs_lock);
6827 now = binder_clock();
6828 hlist_for_each_entry(proc, &binder_procs, proc_node)
6829 print_binder_transaction_brief(m, proc, now);
6830
6831 seq_printf(m, "\npid\tcontext\t\trequest\tstarted\tmax\tready\tfree_async_space\n");
6832 hlist_for_each_entry(proc, &binder_procs, proc_node)
6833 print_binder_proc_brief(m, proc);
6834 mutex_unlock(&binder_procs_lock);
6835
6836 return 0;
6837 }
6838 #endif
6839
init_binder_device(const char *name)6840 static int __init init_binder_device(const char *name)
6841 {
6842 int ret;
6843 struct binder_device *binder_device;
6844
6845 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6846 if (!binder_device)
6847 return -ENOMEM;
6848
6849 binder_device->miscdev.fops = &binder_fops;
6850 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6851 binder_device->miscdev.name = name;
6852
6853 refcount_set(&binder_device->ref, 1);
6854 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6855 binder_device->context.name = name;
6856 mutex_init(&binder_device->context.context_mgr_node_lock);
6857
6858 ret = misc_register(&binder_device->miscdev);
6859 if (ret < 0) {
6860 kfree(binder_device);
6861 return ret;
6862 }
6863
6864 hlist_add_head(&binder_device->hlist, &binder_devices);
6865
6866 return ret;
6867 }
6868
binder_init(void)6869 static int __init binder_init(void)
6870 {
6871 int ret;
6872 char *device_name, *device_tmp;
6873 struct binder_device *device;
6874 struct hlist_node *tmp;
6875 char *device_names = NULL;
6876 const struct binder_debugfs_entry *db_entry;
6877
6878 ret = binder_alloc_shrinker_init();
6879 if (ret)
6880 return ret;
6881
6882 atomic_set(&binder_transaction_log.cur, ~0U);
6883 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6884
6885 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6886
6887 binder_for_each_debugfs_entry(db_entry)
6888 debugfs_create_file(db_entry->name,
6889 db_entry->mode,
6890 binder_debugfs_dir_entry_root,
6891 db_entry->data,
6892 db_entry->fops);
6893
6894 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6895 binder_debugfs_dir_entry_root);
6896
6897 if (binder_debugfs_dir_entry_root) {
6898 #ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
6899 proc_create_data("transaction_proc",
6900 S_IRUGO,
6901 NULL,
6902 &binder_transaction_proc_proc_ops,
6903 NULL);
6904 #endif
6905 }
6906
6907 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6908 strcmp(binder_devices_param, "") != 0) {
6909 /*
6910 * Copy the module_parameter string, because we don't want to
6911 * tokenize it in-place.
6912 */
6913 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6914 if (!device_names) {
6915 ret = -ENOMEM;
6916 goto err_alloc_device_names_failed;
6917 }
6918
6919 device_tmp = device_names;
6920 while ((device_name = strsep(&device_tmp, ","))) {
6921 ret = init_binder_device(device_name);
6922 if (ret)
6923 goto err_init_binder_device_failed;
6924 }
6925 }
6926
6927 ret = init_binderfs();
6928 if (ret)
6929 goto err_init_binder_device_failed;
6930
6931 return ret;
6932
6933 err_init_binder_device_failed:
6934 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6935 misc_deregister(&device->miscdev);
6936 hlist_del(&device->hlist);
6937 kfree(device);
6938 }
6939
6940 kfree(device_names);
6941
6942 err_alloc_device_names_failed:
6943 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6944 binder_alloc_shrinker_exit();
6945
6946 return ret;
6947 }
6948
6949 device_initcall(binder_init);
6950
6951 #define CREATE_TRACE_POINTS
6952 #include "binder_trace.h"
6953
6954 MODULE_LICENSE("GPL v2");
6955