1// SPDX-License-Identifier: GPL-2.0-only
2/* binder.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2008 Google, Inc.
7 */
8
9/*
10 * Locking overview
11 *
12 * There are 3 main spinlocks which must be acquired in the
13 * order shown:
14 *
15 * 1) proc->outer_lock : protects binder_ref
16 *    binder_proc_lock() and binder_proc_unlock() are
17 *    used to acq/rel.
18 * 2) node->lock : protects most fields of binder_node.
19 *    binder_node_lock() and binder_node_unlock() are
20 *    used to acq/rel
21 * 3) proc->inner_lock : protects the thread and node lists
22 *    (proc->threads, proc->waiting_threads, proc->nodes)
23 *    and all todo lists associated with the binder_proc
24 *    (proc->todo, thread->todo, proc->delivered_death and
25 *    node->async_todo), as well as thread->transaction_stack
26 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27 *    are used to acq/rel
28 *
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
31 *
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
34 *
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
40 * ...
41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45#include <linux/fdtable.h>
46#include <linux/file.h>
47#include <linux/freezer.h>
48#include <linux/fs.h>
49#include <linux/list.h>
50#include <linux/miscdevice.h>
51#include <linux/module.h>
52#include <linux/mutex.h>
53#include <linux/nsproxy.h>
54#include <linux/poll.h>
55#include <linux/debugfs.h>
56#include <linux/rbtree.h>
57#include <linux/sched/signal.h>
58#include <linux/sched/mm.h>
59#include <linux/seq_file.h>
60#include <linux/string.h>
61#include <linux/uaccess.h>
62#include <linux/pid_namespace.h>
63#include <linux/security.h>
64#include <linux/spinlock.h>
65#include <linux/ratelimit.h>
66#include <linux/syscalls.h>
67#include <linux/task_work.h>
68#include <linux/sizes.h>
69#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
70#include <linux/trace_clock.h>
71#include <linux/proc_fs.h>
72#endif
73
74#include <uapi/linux/android/binder.h>
75#include <uapi/linux/android/binderfs.h>
76
77#include <asm/cacheflush.h>
78
79#include "binder_alloc.h"
80#include "binder_internal.h"
81#include "binder_trace.h"
82
83static HLIST_HEAD(binder_deferred_list);
84static DEFINE_MUTEX(binder_deferred_lock);
85
86static HLIST_HEAD(binder_devices);
87static HLIST_HEAD(binder_procs);
88static DEFINE_MUTEX(binder_procs_lock);
89
90static HLIST_HEAD(binder_dead_nodes);
91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
95static atomic_t binder_last_id;
96
97static int proc_show(struct seq_file *m, void *unused);
98DEFINE_SHOW_ATTRIBUTE(proc);
99
100#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
101static int binder_transaction_proc_show(struct seq_file *m, void *unused);
102DEFINE_PROC_SHOW_ATTRIBUTE(binder_transaction_proc);
103#endif
104
105#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
106
107#ifdef CONFIG_ACCESS_TOKENID
108#define ENABLE_ACCESS_TOKENID 1
109#else
110#define ENABLE_ACCESS_TOKENID 0
111#endif /* CONFIG_ACCESS_TOKENID */
112
113#ifdef CONFIG_BINDER_SENDER_INFO
114#define ENABLE_BINDER_SENDER_INFO 1
115#else
116#define ENABLE_BINDER_SENDER_INFO 0
117#endif /* CONFIG_BINDER_SENDER_INFO */
118
119#define ACCESS_TOKENID_FEATURE_VALUE (ENABLE_ACCESS_TOKENID << 0)
120#define BINDER_SENDER_INFO_FEATURE_VALUE (ENABLE_BINDER_SENDER_INFO << 2)
121
122#define BINDER_CURRENT_FEATURE_SET (ACCESS_TOKENID_FEATURE_VALUE | BINDER_SENDER_INFO_FEATURE_VALUE)
123
124enum {
125	BINDER_DEBUG_USER_ERROR             = 1U << 0,
126	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
127	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
128	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
129	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
130	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
131	BINDER_DEBUG_READ_WRITE             = 1U << 6,
132	BINDER_DEBUG_USER_REFS              = 1U << 7,
133	BINDER_DEBUG_THREADS                = 1U << 8,
134	BINDER_DEBUG_TRANSACTION            = 1U << 9,
135	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
136	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
137	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
138	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
139	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
140};
141static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
142	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
143module_param_named(debug_mask, binder_debug_mask, uint, 0644);
144
145char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
146module_param_named(devices, binder_devices_param, charp, 0444);
147
148static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
149static int binder_stop_on_user_error;
150
151static int binder_set_stop_on_user_error(const char *val,
152					 const struct kernel_param *kp)
153{
154	int ret;
155
156	ret = param_set_int(val, kp);
157	if (binder_stop_on_user_error < 2)
158		wake_up(&binder_user_error_wait);
159	return ret;
160}
161module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
162	param_get_int, &binder_stop_on_user_error, 0644);
163
164#define binder_debug(mask, x...) \
165	do { \
166		if (binder_debug_mask & mask) \
167			pr_info_ratelimited(x); \
168	} while (0)
169
170#define binder_user_error(x...) \
171	do { \
172		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
173			pr_info_ratelimited(x); \
174		if (binder_stop_on_user_error) \
175			binder_stop_on_user_error = 2; \
176	} while (0)
177
178#define to_flat_binder_object(hdr) \
179	container_of(hdr, struct flat_binder_object, hdr)
180
181#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182
183#define to_binder_buffer_object(hdr) \
184	container_of(hdr, struct binder_buffer_object, hdr)
185
186#define to_binder_fd_array_object(hdr) \
187	container_of(hdr, struct binder_fd_array_object, hdr)
188
189enum binder_stat_types {
190	BINDER_STAT_PROC,
191	BINDER_STAT_THREAD,
192	BINDER_STAT_NODE,
193	BINDER_STAT_REF,
194	BINDER_STAT_DEATH,
195	BINDER_STAT_TRANSACTION,
196	BINDER_STAT_TRANSACTION_COMPLETE,
197	BINDER_STAT_COUNT
198};
199
200struct binder_stats {
201	atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
202	atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
203	atomic_t obj_created[BINDER_STAT_COUNT];
204	atomic_t obj_deleted[BINDER_STAT_COUNT];
205};
206
207static struct binder_stats binder_stats;
208
209static inline void binder_stats_deleted(enum binder_stat_types type)
210{
211	atomic_inc(&binder_stats.obj_deleted[type]);
212}
213
214static inline void binder_stats_created(enum binder_stat_types type)
215{
216	atomic_inc(&binder_stats.obj_created[type]);
217}
218
219struct binder_transaction_log binder_transaction_log;
220struct binder_transaction_log binder_transaction_log_failed;
221
222static struct binder_transaction_log_entry *binder_transaction_log_add(
223	struct binder_transaction_log *log)
224{
225	struct binder_transaction_log_entry *e;
226	unsigned int cur = atomic_inc_return(&log->cur);
227
228	if (cur >= ARRAY_SIZE(log->entry))
229		log->full = true;
230	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
231	WRITE_ONCE(e->debug_id_done, 0);
232	/*
233	 * write-barrier to synchronize access to e->debug_id_done.
234	 * We make sure the initialized 0 value is seen before
235	 * memset() other fields are zeroed by memset.
236	 */
237	smp_wmb();
238	memset(e, 0, sizeof(*e));
239	return e;
240}
241
242/**
243 * struct binder_work - work enqueued on a worklist
244 * @entry:             node enqueued on list
245 * @type:              type of work to be performed
246 *
247 * There are separate work lists for proc, thread, and node (async).
248 */
249struct binder_work {
250	struct list_head entry;
251
252	enum binder_work_type {
253		BINDER_WORK_TRANSACTION = 1,
254		BINDER_WORK_TRANSACTION_COMPLETE,
255		BINDER_WORK_RETURN_ERROR,
256		BINDER_WORK_NODE,
257		BINDER_WORK_DEAD_BINDER,
258		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
259		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
260	} type;
261};
262
263struct binder_error {
264	struct binder_work work;
265	uint32_t cmd;
266};
267
268/**
269 * struct binder_node - binder node bookkeeping
270 * @debug_id:             unique ID for debugging
271 *                        (invariant after initialized)
272 * @lock:                 lock for node fields
273 * @work:                 worklist element for node work
274 *                        (protected by @proc->inner_lock)
275 * @rb_node:              element for proc->nodes tree
276 *                        (protected by @proc->inner_lock)
277 * @dead_node:            element for binder_dead_nodes list
278 *                        (protected by binder_dead_nodes_lock)
279 * @proc:                 binder_proc that owns this node
280 *                        (invariant after initialized)
281 * @refs:                 list of references on this node
282 *                        (protected by @lock)
283 * @internal_strong_refs: used to take strong references when
284 *                        initiating a transaction
285 *                        (protected by @proc->inner_lock if @proc
286 *                        and by @lock)
287 * @local_weak_refs:      weak user refs from local process
288 *                        (protected by @proc->inner_lock if @proc
289 *                        and by @lock)
290 * @local_strong_refs:    strong user refs from local process
291 *                        (protected by @proc->inner_lock if @proc
292 *                        and by @lock)
293 * @tmp_refs:             temporary kernel refs
294 *                        (protected by @proc->inner_lock while @proc
295 *                        is valid, and by binder_dead_nodes_lock
296 *                        if @proc is NULL. During inc/dec and node release
297 *                        it is also protected by @lock to provide safety
298 *                        as the node dies and @proc becomes NULL)
299 * @ptr:                  userspace pointer for node
300 *                        (invariant, no lock needed)
301 * @cookie:               userspace cookie for node
302 *                        (invariant, no lock needed)
303 * @has_strong_ref:       userspace notified of strong ref
304 *                        (protected by @proc->inner_lock if @proc
305 *                        and by @lock)
306 * @pending_strong_ref:   userspace has acked notification of strong ref
307 *                        (protected by @proc->inner_lock if @proc
308 *                        and by @lock)
309 * @has_weak_ref:         userspace notified of weak ref
310 *                        (protected by @proc->inner_lock if @proc
311 *                        and by @lock)
312 * @pending_weak_ref:     userspace has acked notification of weak ref
313 *                        (protected by @proc->inner_lock if @proc
314 *                        and by @lock)
315 * @has_async_transaction: async transaction to node in progress
316 *                        (protected by @lock)
317 * @accept_fds:           file descriptor operations supported for node
318 *                        (invariant after initialized)
319 * @min_priority:         minimum scheduling priority
320 *                        (invariant after initialized)
321 * @txn_security_ctx:     require sender's security context
322 *                        (invariant after initialized)
323 * @async_todo:           list of async work items
324 *                        (protected by @proc->inner_lock)
325 *
326 * Bookkeeping structure for binder nodes.
327 */
328struct binder_node {
329	int debug_id;
330	spinlock_t lock;
331	struct binder_work work;
332	union {
333		struct rb_node rb_node;
334		struct hlist_node dead_node;
335	};
336	struct binder_proc *proc;
337	struct hlist_head refs;
338	int internal_strong_refs;
339	int local_weak_refs;
340	int local_strong_refs;
341	int tmp_refs;
342	binder_uintptr_t ptr;
343	binder_uintptr_t cookie;
344	struct {
345		/*
346		 * bitfield elements protected by
347		 * proc inner_lock
348		 */
349		u8 has_strong_ref:1;
350		u8 pending_strong_ref:1;
351		u8 has_weak_ref:1;
352		u8 pending_weak_ref:1;
353	};
354	struct {
355		/*
356		 * invariant after initialization
357		 */
358		u8 accept_fds:1;
359		u8 txn_security_ctx:1;
360		u8 min_priority;
361	};
362	bool has_async_transaction;
363	struct list_head async_todo;
364};
365
366struct binder_ref_death {
367	/**
368	 * @work: worklist element for death notifications
369	 *        (protected by inner_lock of the proc that
370	 *        this ref belongs to)
371	 */
372	struct binder_work work;
373	binder_uintptr_t cookie;
374};
375
376/**
377 * struct binder_ref_data - binder_ref counts and id
378 * @debug_id:        unique ID for the ref
379 * @desc:            unique userspace handle for ref
380 * @strong:          strong ref count (debugging only if not locked)
381 * @weak:            weak ref count (debugging only if not locked)
382 *
383 * Structure to hold ref count and ref id information. Since
384 * the actual ref can only be accessed with a lock, this structure
385 * is used to return information about the ref to callers of
386 * ref inc/dec functions.
387 */
388struct binder_ref_data {
389	int debug_id;
390	uint32_t desc;
391	int strong;
392	int weak;
393};
394
395/**
396 * struct binder_ref - struct to track references on nodes
397 * @data:        binder_ref_data containing id, handle, and current refcounts
398 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
399 * @rb_node_node: node for lookup by @node in proc's rb_tree
400 * @node_entry:  list entry for node->refs list in target node
401 *               (protected by @node->lock)
402 * @proc:        binder_proc containing ref
403 * @node:        binder_node of target node. When cleaning up a
404 *               ref for deletion in binder_cleanup_ref, a non-NULL
405 *               @node indicates the node must be freed
406 * @death:       pointer to death notification (ref_death) if requested
407 *               (protected by @node->lock)
408 *
409 * Structure to track references from procA to target node (on procB). This
410 * structure is unsafe to access without holding @proc->outer_lock.
411 */
412struct binder_ref {
413	/* Lookups needed: */
414	/*   node + proc => ref (transaction) */
415	/*   desc + proc => ref (transaction, inc/dec ref) */
416	/*   node => refs + procs (proc exit) */
417	struct binder_ref_data data;
418	struct rb_node rb_node_desc;
419	struct rb_node rb_node_node;
420	struct hlist_node node_entry;
421	struct binder_proc *proc;
422	struct binder_node *node;
423	struct binder_ref_death *death;
424};
425
426enum binder_deferred_state {
427	BINDER_DEFERRED_FLUSH        = 0x01,
428	BINDER_DEFERRED_RELEASE      = 0x02,
429};
430
431/**
432 * struct binder_proc - binder process bookkeeping
433 * @proc_node:            element for binder_procs list
434 * @threads:              rbtree of binder_threads in this proc
435 *                        (protected by @inner_lock)
436 * @nodes:                rbtree of binder nodes associated with
437 *                        this proc ordered by node->ptr
438 *                        (protected by @inner_lock)
439 * @refs_by_desc:         rbtree of refs ordered by ref->desc
440 *                        (protected by @outer_lock)
441 * @refs_by_node:         rbtree of refs ordered by ref->node
442 *                        (protected by @outer_lock)
443 * @waiting_threads:      threads currently waiting for proc work
444 *                        (protected by @inner_lock)
445 * @pid                   PID of group_leader of process
446 *                        (invariant after initialized)
447 * @tsk                   task_struct for group_leader of process
448 *                        (invariant after initialized)
449 * @cred                  struct cred associated with the `struct file`
450 *                        in binder_open()
451 *                        (invariant after initialized)
452 * @deferred_work_node:   element for binder_deferred_list
453 *                        (protected by binder_deferred_lock)
454 * @deferred_work:        bitmap of deferred work to perform
455 *                        (protected by binder_deferred_lock)
456 * @is_dead:              process is dead and awaiting free
457 *                        when outstanding transactions are cleaned up
458 *                        (protected by @inner_lock)
459 * @todo:                 list of work for this process
460 *                        (protected by @inner_lock)
461 * @stats:                per-process binder statistics
462 *                        (atomics, no lock needed)
463 * @delivered_death:      list of delivered death notification
464 *                        (protected by @inner_lock)
465 * @max_threads:          cap on number of binder threads
466 *                        (protected by @inner_lock)
467 * @requested_threads:    number of binder threads requested but not
468 *                        yet started. In current implementation, can
469 *                        only be 0 or 1.
470 *                        (protected by @inner_lock)
471 * @requested_threads_started: number binder threads started
472 *                        (protected by @inner_lock)
473 * @tmp_ref:              temporary reference to indicate proc is in use
474 *                        (protected by @inner_lock)
475 * @default_priority:     default scheduler priority
476 *                        (invariant after initialized)
477 * @debugfs_entry:        debugfs node
478 * @alloc:                binder allocator bookkeeping
479 * @context:              binder_context for this proc
480 *                        (invariant after initialized)
481 * @inner_lock:           can nest under outer_lock and/or node lock
482 * @outer_lock:           no nesting under innor or node lock
483 *                        Lock order: 1) outer, 2) node, 3) inner
484 * @binderfs_entry:       process-specific binderfs log file
485 *
486 * Bookkeeping structure for binder processes
487 */
488struct binder_proc {
489	struct hlist_node proc_node;
490	struct rb_root threads;
491	struct rb_root nodes;
492	struct rb_root refs_by_desc;
493	struct rb_root refs_by_node;
494	struct list_head waiting_threads;
495	int pid;
496	struct task_struct *tsk;
497	const struct cred *cred;
498	struct hlist_node deferred_work_node;
499	int deferred_work;
500	bool is_dead;
501
502	struct list_head todo;
503	struct binder_stats stats;
504	struct list_head delivered_death;
505	int max_threads;
506	int requested_threads;
507	int requested_threads_started;
508	int tmp_ref;
509	long default_priority;
510	struct dentry *debugfs_entry;
511	struct binder_alloc alloc;
512	struct binder_context *context;
513	spinlock_t inner_lock;
514	spinlock_t outer_lock;
515	struct dentry *binderfs_entry;
516};
517
518enum {
519	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
520	BINDER_LOOPER_STATE_ENTERED     = 0x02,
521	BINDER_LOOPER_STATE_EXITED      = 0x04,
522	BINDER_LOOPER_STATE_INVALID     = 0x08,
523	BINDER_LOOPER_STATE_WAITING     = 0x10,
524	BINDER_LOOPER_STATE_POLL        = 0x20,
525};
526
527/**
528 * struct binder_thread - binder thread bookkeeping
529 * @proc:                 binder process for this thread
530 *                        (invariant after initialization)
531 * @rb_node:              element for proc->threads rbtree
532 *                        (protected by @proc->inner_lock)
533 * @waiting_thread_node:  element for @proc->waiting_threads list
534 *                        (protected by @proc->inner_lock)
535 * @pid:                  PID for this thread
536 *                        (invariant after initialization)
537 * @looper:               bitmap of looping state
538 *                        (only accessed by this thread)
539 * @looper_needs_return:  looping thread needs to exit driver
540 *                        (no lock needed)
541 * @transaction_stack:    stack of in-progress transactions for this thread
542 *                        (protected by @proc->inner_lock)
543 * @todo:                 list of work to do for this thread
544 *                        (protected by @proc->inner_lock)
545 * @process_todo:         whether work in @todo should be processed
546 *                        (protected by @proc->inner_lock)
547 * @return_error:         transaction errors reported by this thread
548 *                        (only accessed by this thread)
549 * @reply_error:          transaction errors reported by target thread
550 *                        (protected by @proc->inner_lock)
551 * @wait:                 wait queue for thread work
552 * @stats:                per-thread statistics
553 *                        (atomics, no lock needed)
554 * @tmp_ref:              temporary reference to indicate thread is in use
555 *                        (atomic since @proc->inner_lock cannot
556 *                        always be acquired)
557 * @is_dead:              thread is dead and awaiting free
558 *                        when outstanding transactions are cleaned up
559 *                        (protected by @proc->inner_lock)
560 *
561 * Bookkeeping structure for binder threads.
562 */
563struct binder_thread {
564	struct binder_proc *proc;
565	struct rb_node rb_node;
566	struct list_head waiting_thread_node;
567	int pid;
568	int looper;              /* only modified by this thread */
569	bool looper_need_return; /* can be written by other thread */
570	struct binder_transaction *transaction_stack;
571	struct list_head todo;
572	bool process_todo;
573	struct binder_error return_error;
574	struct binder_error reply_error;
575	wait_queue_head_t wait;
576	struct binder_stats stats;
577	atomic_t tmp_ref;
578	bool is_dead;
579#ifdef CONFIG_ACCESS_TOKENID
580	struct access_token tokens;
581#endif /* CONFIG_ACCESS_TOKENID */
582#ifdef CONFIG_BINDER_SENDER_INFO
583	__u64 sender_pid_nr;
584#endif /* CONFIG_BINDER_SENDER_INFO */
585};
586
587/**
588 * struct binder_txn_fd_fixup - transaction fd fixup list element
589 * @fixup_entry:          list entry
590 * @file:                 struct file to be associated with new fd
591 * @offset:               offset in buffer data to this fixup
592 *
593 * List element for fd fixups in a transaction. Since file
594 * descriptors need to be allocated in the context of the
595 * target process, we pass each fd to be processed in this
596 * struct.
597 */
598struct binder_txn_fd_fixup {
599	struct list_head fixup_entry;
600	struct file *file;
601	size_t offset;
602};
603
604struct binder_transaction {
605	int debug_id;
606	struct binder_work work;
607	struct binder_thread *from;
608#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
609	int async_from_pid;
610	int async_from_tid;
611	u64 timestamp;
612#endif
613	struct binder_transaction *from_parent;
614	struct binder_proc *to_proc;
615	struct binder_thread *to_thread;
616	struct binder_transaction *to_parent;
617	unsigned need_reply:1;
618	/* unsigned is_dead:1; */	/* not used at the moment */
619
620	struct binder_buffer *buffer;
621	unsigned int	code;
622	unsigned int	flags;
623	long	priority;
624	long	saved_priority;
625	kuid_t	sender_euid;
626	struct list_head fd_fixups;
627	binder_uintptr_t security_ctx;
628	/**
629	 * @lock:  protects @from, @to_proc, and @to_thread
630	 *
631	 * @from, @to_proc, and @to_thread can be set to NULL
632	 * during thread teardown
633	 */
634	spinlock_t lock;
635#ifdef CONFIG_ACCESS_TOKENID
636	u64 sender_tokenid;
637	u64 first_tokenid;
638#endif /* CONFIG_ACCESS_TOKENID */
639};
640
641/**
642 * struct binder_object - union of flat binder object types
643 * @hdr:   generic object header
644 * @fbo:   binder object (nodes and refs)
645 * @fdo:   file descriptor object
646 * @bbo:   binder buffer pointer
647 * @fdao:  file descriptor array
648 *
649 * Used for type-independent object copies
650 */
651struct binder_object {
652	union {
653		struct binder_object_header hdr;
654		struct flat_binder_object fbo;
655		struct binder_fd_object fdo;
656		struct binder_buffer_object bbo;
657		struct binder_fd_array_object fdao;
658	};
659};
660
661/**
662 * binder_proc_lock() - Acquire outer lock for given binder_proc
663 * @proc:         struct binder_proc to acquire
664 *
665 * Acquires proc->outer_lock. Used to protect binder_ref
666 * structures associated with the given proc.
667 */
668#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
669static void
670_binder_proc_lock(struct binder_proc *proc, int line)
671	__acquires(&proc->outer_lock)
672{
673	binder_debug(BINDER_DEBUG_SPINLOCKS,
674		     "%s: line=%d\n", __func__, line);
675	spin_lock(&proc->outer_lock);
676}
677
678/**
679 * binder_proc_unlock() - Release spinlock for given binder_proc
680 * @proc:         struct binder_proc to acquire
681 *
682 * Release lock acquired via binder_proc_lock()
683 */
684#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
685static void
686_binder_proc_unlock(struct binder_proc *proc, int line)
687	__releases(&proc->outer_lock)
688{
689	binder_debug(BINDER_DEBUG_SPINLOCKS,
690		     "%s: line=%d\n", __func__, line);
691	spin_unlock(&proc->outer_lock);
692}
693
694/**
695 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
696 * @proc:         struct binder_proc to acquire
697 *
698 * Acquires proc->inner_lock. Used to protect todo lists
699 */
700#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
701static void
702_binder_inner_proc_lock(struct binder_proc *proc, int line)
703	__acquires(&proc->inner_lock)
704{
705	binder_debug(BINDER_DEBUG_SPINLOCKS,
706		     "%s: line=%d\n", __func__, line);
707	spin_lock(&proc->inner_lock);
708}
709
710/**
711 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
712 * @proc:         struct binder_proc to acquire
713 *
714 * Release lock acquired via binder_inner_proc_lock()
715 */
716#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
717static void
718_binder_inner_proc_unlock(struct binder_proc *proc, int line)
719	__releases(&proc->inner_lock)
720{
721	binder_debug(BINDER_DEBUG_SPINLOCKS,
722		     "%s: line=%d\n", __func__, line);
723	spin_unlock(&proc->inner_lock);
724}
725
726/**
727 * binder_node_lock() - Acquire spinlock for given binder_node
728 * @node:         struct binder_node to acquire
729 *
730 * Acquires node->lock. Used to protect binder_node fields
731 */
732#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
733static void
734_binder_node_lock(struct binder_node *node, int line)
735	__acquires(&node->lock)
736{
737	binder_debug(BINDER_DEBUG_SPINLOCKS,
738		     "%s: line=%d\n", __func__, line);
739	spin_lock(&node->lock);
740}
741
742/**
743 * binder_node_unlock() - Release spinlock for given binder_proc
744 * @node:         struct binder_node to acquire
745 *
746 * Release lock acquired via binder_node_lock()
747 */
748#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
749static void
750_binder_node_unlock(struct binder_node *node, int line)
751	__releases(&node->lock)
752{
753	binder_debug(BINDER_DEBUG_SPINLOCKS,
754		     "%s: line=%d\n", __func__, line);
755	spin_unlock(&node->lock);
756}
757
758/**
759 * binder_node_inner_lock() - Acquire node and inner locks
760 * @node:         struct binder_node to acquire
761 *
762 * Acquires node->lock. If node->proc also acquires
763 * proc->inner_lock. Used to protect binder_node fields
764 */
765#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
766static void
767_binder_node_inner_lock(struct binder_node *node, int line)
768	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
769{
770	binder_debug(BINDER_DEBUG_SPINLOCKS,
771		     "%s: line=%d\n", __func__, line);
772	spin_lock(&node->lock);
773	if (node->proc)
774		binder_inner_proc_lock(node->proc);
775	else
776		/* annotation for sparse */
777		__acquire(&node->proc->inner_lock);
778}
779
780/**
781 * binder_node_unlock() - Release node and inner locks
782 * @node:         struct binder_node to acquire
783 *
784 * Release lock acquired via binder_node_lock()
785 */
786#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
787static void
788_binder_node_inner_unlock(struct binder_node *node, int line)
789	__releases(&node->lock) __releases(&node->proc->inner_lock)
790{
791	struct binder_proc *proc = node->proc;
792
793	binder_debug(BINDER_DEBUG_SPINLOCKS,
794		     "%s: line=%d\n", __func__, line);
795	if (proc)
796		binder_inner_proc_unlock(proc);
797	else
798		/* annotation for sparse */
799		__release(&node->proc->inner_lock);
800	spin_unlock(&node->lock);
801}
802
803static bool binder_worklist_empty_ilocked(struct list_head *list)
804{
805	return list_empty(list);
806}
807
808/**
809 * binder_worklist_empty() - Check if no items on the work list
810 * @proc:       binder_proc associated with list
811 * @list:	list to check
812 *
813 * Return: true if there are no items on list, else false
814 */
815static bool binder_worklist_empty(struct binder_proc *proc,
816				  struct list_head *list)
817{
818	bool ret;
819
820	binder_inner_proc_lock(proc);
821	ret = binder_worklist_empty_ilocked(list);
822	binder_inner_proc_unlock(proc);
823	return ret;
824}
825
826/**
827 * binder_enqueue_work_ilocked() - Add an item to the work list
828 * @work:         struct binder_work to add to list
829 * @target_list:  list to add work to
830 *
831 * Adds the work to the specified list. Asserts that work
832 * is not already on a list.
833 *
834 * Requires the proc->inner_lock to be held.
835 */
836static void
837binder_enqueue_work_ilocked(struct binder_work *work,
838			   struct list_head *target_list)
839{
840	BUG_ON(target_list == NULL);
841	BUG_ON(work->entry.next && !list_empty(&work->entry));
842	list_add_tail(&work->entry, target_list);
843}
844
845/**
846 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
847 * @thread:       thread to queue work to
848 * @work:         struct binder_work to add to list
849 *
850 * Adds the work to the todo list of the thread. Doesn't set the process_todo
851 * flag, which means that (if it wasn't already set) the thread will go to
852 * sleep without handling this work when it calls read.
853 *
854 * Requires the proc->inner_lock to be held.
855 */
856static void
857binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
858					    struct binder_work *work)
859{
860	WARN_ON(!list_empty(&thread->waiting_thread_node));
861	binder_enqueue_work_ilocked(work, &thread->todo);
862}
863
864/**
865 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
866 * @thread:       thread to queue work to
867 * @work:         struct binder_work to add to list
868 *
869 * Adds the work to the todo list of the thread, and enables processing
870 * of the todo queue.
871 *
872 * Requires the proc->inner_lock to be held.
873 */
874static void
875binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
876				   struct binder_work *work)
877{
878	WARN_ON(!list_empty(&thread->waiting_thread_node));
879	binder_enqueue_work_ilocked(work, &thread->todo);
880
881	/* (e)poll-based threads require an explicit wakeup signal when
882	 * queuing their own work; they rely on these events to consume
883	 * messages without I/O block. Without it, threads risk waiting
884	 * indefinitely without handling the work.
885	 */
886	if (thread->looper & BINDER_LOOPER_STATE_POLL &&
887	    thread->pid == current->pid && !thread->process_todo)
888		wake_up_interruptible_sync(&thread->wait);
889
890	thread->process_todo = true;
891}
892
893/**
894 * binder_enqueue_thread_work() - Add an item to the thread work list
895 * @thread:       thread to queue work to
896 * @work:         struct binder_work to add to list
897 *
898 * Adds the work to the todo list of the thread, and enables processing
899 * of the todo queue.
900 */
901static void
902binder_enqueue_thread_work(struct binder_thread *thread,
903			   struct binder_work *work)
904{
905	binder_inner_proc_lock(thread->proc);
906	binder_enqueue_thread_work_ilocked(thread, work);
907	binder_inner_proc_unlock(thread->proc);
908}
909
910static void
911binder_dequeue_work_ilocked(struct binder_work *work)
912{
913	list_del_init(&work->entry);
914}
915
916/**
917 * binder_dequeue_work() - Removes an item from the work list
918 * @proc:         binder_proc associated with list
919 * @work:         struct binder_work to remove from list
920 *
921 * Removes the specified work item from whatever list it is on.
922 * Can safely be called if work is not on any list.
923 */
924static void
925binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
926{
927	binder_inner_proc_lock(proc);
928	binder_dequeue_work_ilocked(work);
929	binder_inner_proc_unlock(proc);
930}
931
932static struct binder_work *binder_dequeue_work_head_ilocked(
933					struct list_head *list)
934{
935	struct binder_work *w;
936
937	w = list_first_entry_or_null(list, struct binder_work, entry);
938	if (w)
939		list_del_init(&w->entry);
940	return w;
941}
942
943static void
944binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
945static void binder_free_thread(struct binder_thread *thread);
946static void binder_free_proc(struct binder_proc *proc);
947static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
948
949#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
950static inline u64 binder_clock(void)
951{
952#ifdef CONFIG_TRACE_CLOCK
953	return trace_clock_local();
954#endif
955	return 0;
956}
957#endif
958
959static bool binder_has_work_ilocked(struct binder_thread *thread,
960				    bool do_proc_work)
961{
962	return thread->process_todo ||
963		thread->looper_need_return ||
964		(do_proc_work &&
965		 !binder_worklist_empty_ilocked(&thread->proc->todo));
966}
967
968static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
969{
970	bool has_work;
971
972	binder_inner_proc_lock(thread->proc);
973	has_work = binder_has_work_ilocked(thread, do_proc_work);
974	binder_inner_proc_unlock(thread->proc);
975
976	return has_work;
977}
978
979static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
980{
981	return !thread->transaction_stack &&
982		binder_worklist_empty_ilocked(&thread->todo) &&
983		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
984				   BINDER_LOOPER_STATE_REGISTERED));
985}
986
987static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
988					       bool sync)
989{
990	struct rb_node *n;
991	struct binder_thread *thread;
992
993	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
994		thread = rb_entry(n, struct binder_thread, rb_node);
995		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
996		    binder_available_for_proc_work_ilocked(thread)) {
997			if (sync)
998				wake_up_interruptible_sync(&thread->wait);
999			else
1000				wake_up_interruptible(&thread->wait);
1001		}
1002	}
1003}
1004
1005/**
1006 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1007 * @proc:	process to select a thread from
1008 *
1009 * Note that calling this function moves the thread off the waiting_threads
1010 * list, so it can only be woken up by the caller of this function, or a
1011 * signal. Therefore, callers *should* always wake up the thread this function
1012 * returns.
1013 *
1014 * Return:	If there's a thread currently waiting for process work,
1015 *		returns that thread. Otherwise returns NULL.
1016 */
1017static struct binder_thread *
1018binder_select_thread_ilocked(struct binder_proc *proc)
1019{
1020	struct binder_thread *thread;
1021
1022	assert_spin_locked(&proc->inner_lock);
1023	thread = list_first_entry_or_null(&proc->waiting_threads,
1024					  struct binder_thread,
1025					  waiting_thread_node);
1026
1027	if (thread)
1028		list_del_init(&thread->waiting_thread_node);
1029
1030	return thread;
1031}
1032
1033/**
1034 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1035 * @proc:	process to wake up a thread in
1036 * @thread:	specific thread to wake-up (may be NULL)
1037 * @sync:	whether to do a synchronous wake-up
1038 *
1039 * This function wakes up a thread in the @proc process.
1040 * The caller may provide a specific thread to wake-up in
1041 * the @thread parameter. If @thread is NULL, this function
1042 * will wake up threads that have called poll().
1043 *
1044 * Note that for this function to work as expected, callers
1045 * should first call binder_select_thread() to find a thread
1046 * to handle the work (if they don't have a thread already),
1047 * and pass the result into the @thread parameter.
1048 */
1049static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1050					 struct binder_thread *thread,
1051					 bool sync)
1052{
1053	assert_spin_locked(&proc->inner_lock);
1054
1055	if (thread) {
1056		if (sync)
1057			wake_up_interruptible_sync(&thread->wait);
1058		else
1059			wake_up_interruptible(&thread->wait);
1060		return;
1061	}
1062
1063	/* Didn't find a thread waiting for proc work; this can happen
1064	 * in two scenarios:
1065	 * 1. All threads are busy handling transactions
1066	 *    In that case, one of those threads should call back into
1067	 *    the kernel driver soon and pick up this work.
1068	 * 2. Threads are using the (e)poll interface, in which case
1069	 *    they may be blocked on the waitqueue without having been
1070	 *    added to waiting_threads. For this case, we just iterate
1071	 *    over all threads not handling transaction work, and
1072	 *    wake them all up. We wake all because we don't know whether
1073	 *    a thread that called into (e)poll is handling non-binder
1074	 *    work currently.
1075	 */
1076	binder_wakeup_poll_threads_ilocked(proc, sync);
1077}
1078
1079static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1080{
1081	struct binder_thread *thread = binder_select_thread_ilocked(proc);
1082
1083	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1084}
1085
1086static void binder_set_nice(long nice)
1087{
1088	long min_nice;
1089
1090	if (can_nice(current, nice)) {
1091		set_user_nice(current, nice);
1092		return;
1093	}
1094	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1095	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1096		     "%d: nice value %ld not allowed use %ld instead\n",
1097		      current->pid, nice, min_nice);
1098	set_user_nice(current, min_nice);
1099	if (min_nice <= MAX_NICE)
1100		return;
1101	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1102}
1103
1104static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1105						   binder_uintptr_t ptr)
1106{
1107	struct rb_node *n = proc->nodes.rb_node;
1108	struct binder_node *node;
1109
1110	assert_spin_locked(&proc->inner_lock);
1111
1112	while (n) {
1113		node = rb_entry(n, struct binder_node, rb_node);
1114
1115		if (ptr < node->ptr)
1116			n = n->rb_left;
1117		else if (ptr > node->ptr)
1118			n = n->rb_right;
1119		else {
1120			/*
1121			 * take an implicit weak reference
1122			 * to ensure node stays alive until
1123			 * call to binder_put_node()
1124			 */
1125			binder_inc_node_tmpref_ilocked(node);
1126			return node;
1127		}
1128	}
1129	return NULL;
1130}
1131
1132static struct binder_node *binder_get_node(struct binder_proc *proc,
1133					   binder_uintptr_t ptr)
1134{
1135	struct binder_node *node;
1136
1137	binder_inner_proc_lock(proc);
1138	node = binder_get_node_ilocked(proc, ptr);
1139	binder_inner_proc_unlock(proc);
1140	return node;
1141}
1142
1143static struct binder_node *binder_init_node_ilocked(
1144						struct binder_proc *proc,
1145						struct binder_node *new_node,
1146						struct flat_binder_object *fp)
1147{
1148	struct rb_node **p = &proc->nodes.rb_node;
1149	struct rb_node *parent = NULL;
1150	struct binder_node *node;
1151	binder_uintptr_t ptr = fp ? fp->binder : 0;
1152	binder_uintptr_t cookie = fp ? fp->cookie : 0;
1153	__u32 flags = fp ? fp->flags : 0;
1154
1155	assert_spin_locked(&proc->inner_lock);
1156
1157	while (*p) {
1158
1159		parent = *p;
1160		node = rb_entry(parent, struct binder_node, rb_node);
1161
1162		if (ptr < node->ptr)
1163			p = &(*p)->rb_left;
1164		else if (ptr > node->ptr)
1165			p = &(*p)->rb_right;
1166		else {
1167			/*
1168			 * A matching node is already in
1169			 * the rb tree. Abandon the init
1170			 * and return it.
1171			 */
1172			binder_inc_node_tmpref_ilocked(node);
1173			return node;
1174		}
1175	}
1176	node = new_node;
1177	binder_stats_created(BINDER_STAT_NODE);
1178	node->tmp_refs++;
1179	rb_link_node(&node->rb_node, parent, p);
1180	rb_insert_color(&node->rb_node, &proc->nodes);
1181	node->debug_id = atomic_inc_return(&binder_last_id);
1182	node->proc = proc;
1183	node->ptr = ptr;
1184	node->cookie = cookie;
1185	node->work.type = BINDER_WORK_NODE;
1186	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1187	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1188	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1189	spin_lock_init(&node->lock);
1190	INIT_LIST_HEAD(&node->work.entry);
1191	INIT_LIST_HEAD(&node->async_todo);
1192	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1193		     "%d:%d node %d u%016llx c%016llx created\n",
1194		     proc->pid, current->pid, node->debug_id,
1195		     (u64)node->ptr, (u64)node->cookie);
1196
1197	return node;
1198}
1199
1200static struct binder_node *binder_new_node(struct binder_proc *proc,
1201					   struct flat_binder_object *fp)
1202{
1203	struct binder_node *node;
1204	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1205
1206	if (!new_node)
1207		return NULL;
1208	binder_inner_proc_lock(proc);
1209	node = binder_init_node_ilocked(proc, new_node, fp);
1210	binder_inner_proc_unlock(proc);
1211	if (node != new_node)
1212		/*
1213		 * The node was already added by another thread
1214		 */
1215		kfree(new_node);
1216
1217	return node;
1218}
1219
1220static void binder_free_node(struct binder_node *node)
1221{
1222	kfree(node);
1223	binder_stats_deleted(BINDER_STAT_NODE);
1224}
1225
1226static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1227				    int internal,
1228				    struct list_head *target_list)
1229{
1230	struct binder_proc *proc = node->proc;
1231
1232	assert_spin_locked(&node->lock);
1233	if (proc)
1234		assert_spin_locked(&proc->inner_lock);
1235	if (strong) {
1236		if (internal) {
1237			if (target_list == NULL &&
1238			    node->internal_strong_refs == 0 &&
1239			    !(node->proc &&
1240			      node == node->proc->context->binder_context_mgr_node &&
1241			      node->has_strong_ref)) {
1242				pr_err("invalid inc strong node for %d\n",
1243					node->debug_id);
1244				return -EINVAL;
1245			}
1246			node->internal_strong_refs++;
1247		} else
1248			node->local_strong_refs++;
1249		if (!node->has_strong_ref && target_list) {
1250			struct binder_thread *thread = container_of(target_list,
1251						    struct binder_thread, todo);
1252			binder_dequeue_work_ilocked(&node->work);
1253			BUG_ON(&thread->todo != target_list);
1254			binder_enqueue_deferred_thread_work_ilocked(thread,
1255								   &node->work);
1256		}
1257	} else {
1258		if (!internal)
1259			node->local_weak_refs++;
1260		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1261			if (target_list == NULL) {
1262				pr_err("invalid inc weak node for %d\n",
1263					node->debug_id);
1264				return -EINVAL;
1265			}
1266			/*
1267			 * See comment above
1268			 */
1269			binder_enqueue_work_ilocked(&node->work, target_list);
1270		}
1271	}
1272	return 0;
1273}
1274
1275static int binder_inc_node(struct binder_node *node, int strong, int internal,
1276			   struct list_head *target_list)
1277{
1278	int ret;
1279
1280	binder_node_inner_lock(node);
1281	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1282	binder_node_inner_unlock(node);
1283
1284	return ret;
1285}
1286
1287static bool binder_dec_node_nilocked(struct binder_node *node,
1288				     int strong, int internal)
1289{
1290	struct binder_proc *proc = node->proc;
1291
1292	assert_spin_locked(&node->lock);
1293	if (proc)
1294		assert_spin_locked(&proc->inner_lock);
1295	if (strong) {
1296		if (internal)
1297			node->internal_strong_refs--;
1298		else
1299			node->local_strong_refs--;
1300		if (node->local_strong_refs || node->internal_strong_refs)
1301			return false;
1302	} else {
1303		if (!internal)
1304			node->local_weak_refs--;
1305		if (node->local_weak_refs || node->tmp_refs ||
1306				!hlist_empty(&node->refs))
1307			return false;
1308	}
1309
1310	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1311		if (list_empty(&node->work.entry)) {
1312			binder_enqueue_work_ilocked(&node->work, &proc->todo);
1313			binder_wakeup_proc_ilocked(proc);
1314		}
1315	} else {
1316		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1317		    !node->local_weak_refs && !node->tmp_refs) {
1318			if (proc) {
1319				binder_dequeue_work_ilocked(&node->work);
1320				rb_erase(&node->rb_node, &proc->nodes);
1321				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1322					     "refless node %d deleted\n",
1323					     node->debug_id);
1324			} else {
1325				BUG_ON(!list_empty(&node->work.entry));
1326				spin_lock(&binder_dead_nodes_lock);
1327				/*
1328				 * tmp_refs could have changed so
1329				 * check it again
1330				 */
1331				if (node->tmp_refs) {
1332					spin_unlock(&binder_dead_nodes_lock);
1333					return false;
1334				}
1335				hlist_del(&node->dead_node);
1336				spin_unlock(&binder_dead_nodes_lock);
1337				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1338					     "dead node %d deleted\n",
1339					     node->debug_id);
1340			}
1341			return true;
1342		}
1343	}
1344	return false;
1345}
1346
1347static void binder_dec_node(struct binder_node *node, int strong, int internal)
1348{
1349	bool free_node;
1350
1351	binder_node_inner_lock(node);
1352	free_node = binder_dec_node_nilocked(node, strong, internal);
1353	binder_node_inner_unlock(node);
1354	if (free_node)
1355		binder_free_node(node);
1356}
1357
1358static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1359{
1360	/*
1361	 * No call to binder_inc_node() is needed since we
1362	 * don't need to inform userspace of any changes to
1363	 * tmp_refs
1364	 */
1365	node->tmp_refs++;
1366}
1367
1368/**
1369 * binder_inc_node_tmpref() - take a temporary reference on node
1370 * @node:	node to reference
1371 *
1372 * Take reference on node to prevent the node from being freed
1373 * while referenced only by a local variable. The inner lock is
1374 * needed to serialize with the node work on the queue (which
1375 * isn't needed after the node is dead). If the node is dead
1376 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1377 * node->tmp_refs against dead-node-only cases where the node
1378 * lock cannot be acquired (eg traversing the dead node list to
1379 * print nodes)
1380 */
1381static void binder_inc_node_tmpref(struct binder_node *node)
1382{
1383	binder_node_lock(node);
1384	if (node->proc)
1385		binder_inner_proc_lock(node->proc);
1386	else
1387		spin_lock(&binder_dead_nodes_lock);
1388	binder_inc_node_tmpref_ilocked(node);
1389	if (node->proc)
1390		binder_inner_proc_unlock(node->proc);
1391	else
1392		spin_unlock(&binder_dead_nodes_lock);
1393	binder_node_unlock(node);
1394}
1395
1396/**
1397 * binder_dec_node_tmpref() - remove a temporary reference on node
1398 * @node:	node to reference
1399 *
1400 * Release temporary reference on node taken via binder_inc_node_tmpref()
1401 */
1402static void binder_dec_node_tmpref(struct binder_node *node)
1403{
1404	bool free_node;
1405
1406	binder_node_inner_lock(node);
1407	if (!node->proc)
1408		spin_lock(&binder_dead_nodes_lock);
1409	else
1410		__acquire(&binder_dead_nodes_lock);
1411	node->tmp_refs--;
1412	BUG_ON(node->tmp_refs < 0);
1413	if (!node->proc)
1414		spin_unlock(&binder_dead_nodes_lock);
1415	else
1416		__release(&binder_dead_nodes_lock);
1417	/*
1418	 * Call binder_dec_node() to check if all refcounts are 0
1419	 * and cleanup is needed. Calling with strong=0 and internal=1
1420	 * causes no actual reference to be released in binder_dec_node().
1421	 * If that changes, a change is needed here too.
1422	 */
1423	free_node = binder_dec_node_nilocked(node, 0, 1);
1424	binder_node_inner_unlock(node);
1425	if (free_node)
1426		binder_free_node(node);
1427}
1428
1429static void binder_put_node(struct binder_node *node)
1430{
1431	binder_dec_node_tmpref(node);
1432}
1433
1434static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1435						 u32 desc, bool need_strong_ref)
1436{
1437	struct rb_node *n = proc->refs_by_desc.rb_node;
1438	struct binder_ref *ref;
1439
1440	while (n) {
1441		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1442
1443		if (desc < ref->data.desc) {
1444			n = n->rb_left;
1445		} else if (desc > ref->data.desc) {
1446			n = n->rb_right;
1447		} else if (need_strong_ref && !ref->data.strong) {
1448			binder_user_error("tried to use weak ref as strong ref\n");
1449			return NULL;
1450		} else {
1451			return ref;
1452		}
1453	}
1454	return NULL;
1455}
1456
1457/**
1458 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1459 * @proc:	binder_proc that owns the ref
1460 * @node:	binder_node of target
1461 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
1462 *
1463 * Look up the ref for the given node and return it if it exists
1464 *
1465 * If it doesn't exist and the caller provides a newly allocated
1466 * ref, initialize the fields of the newly allocated ref and insert
1467 * into the given proc rb_trees and node refs list.
1468 *
1469 * Return:	the ref for node. It is possible that another thread
1470 *		allocated/initialized the ref first in which case the
1471 *		returned ref would be different than the passed-in
1472 *		new_ref. new_ref must be kfree'd by the caller in
1473 *		this case.
1474 */
1475static struct binder_ref *binder_get_ref_for_node_olocked(
1476					struct binder_proc *proc,
1477					struct binder_node *node,
1478					struct binder_ref *new_ref)
1479{
1480	struct binder_context *context = proc->context;
1481	struct rb_node **p = &proc->refs_by_node.rb_node;
1482	struct rb_node *parent = NULL;
1483	struct binder_ref *ref;
1484	struct rb_node *n;
1485
1486	while (*p) {
1487		parent = *p;
1488		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1489
1490		if (node < ref->node)
1491			p = &(*p)->rb_left;
1492		else if (node > ref->node)
1493			p = &(*p)->rb_right;
1494		else
1495			return ref;
1496	}
1497	if (!new_ref)
1498		return NULL;
1499
1500	binder_stats_created(BINDER_STAT_REF);
1501	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1502	new_ref->proc = proc;
1503	new_ref->node = node;
1504	rb_link_node(&new_ref->rb_node_node, parent, p);
1505	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1506
1507	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1508	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1509		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1510		if (ref->data.desc > new_ref->data.desc)
1511			break;
1512		new_ref->data.desc = ref->data.desc + 1;
1513	}
1514
1515	p = &proc->refs_by_desc.rb_node;
1516	while (*p) {
1517		parent = *p;
1518		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1519
1520		if (new_ref->data.desc < ref->data.desc)
1521			p = &(*p)->rb_left;
1522		else if (new_ref->data.desc > ref->data.desc)
1523			p = &(*p)->rb_right;
1524		else
1525			BUG();
1526	}
1527	rb_link_node(&new_ref->rb_node_desc, parent, p);
1528	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1529
1530	binder_node_lock(node);
1531	hlist_add_head(&new_ref->node_entry, &node->refs);
1532
1533	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1534		     "%d new ref %d desc %d for node %d\n",
1535		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1536		      node->debug_id);
1537	binder_node_unlock(node);
1538	return new_ref;
1539}
1540
1541static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1542{
1543	bool delete_node = false;
1544
1545	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1546		     "%d delete ref %d desc %d for node %d\n",
1547		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
1548		      ref->node->debug_id);
1549
1550	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1551	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1552
1553	binder_node_inner_lock(ref->node);
1554	if (ref->data.strong)
1555		binder_dec_node_nilocked(ref->node, 1, 1);
1556
1557	hlist_del(&ref->node_entry);
1558	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1559	binder_node_inner_unlock(ref->node);
1560	/*
1561	 * Clear ref->node unless we want the caller to free the node
1562	 */
1563	if (!delete_node) {
1564		/*
1565		 * The caller uses ref->node to determine
1566		 * whether the node needs to be freed. Clear
1567		 * it since the node is still alive.
1568		 */
1569		ref->node = NULL;
1570	}
1571
1572	if (ref->death) {
1573		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1574			     "%d delete ref %d desc %d has death notification\n",
1575			      ref->proc->pid, ref->data.debug_id,
1576			      ref->data.desc);
1577		binder_dequeue_work(ref->proc, &ref->death->work);
1578		binder_stats_deleted(BINDER_STAT_DEATH);
1579	}
1580	binder_stats_deleted(BINDER_STAT_REF);
1581}
1582
1583/**
1584 * binder_inc_ref_olocked() - increment the ref for given handle
1585 * @ref:         ref to be incremented
1586 * @strong:      if true, strong increment, else weak
1587 * @target_list: list to queue node work on
1588 *
1589 * Increment the ref. @ref->proc->outer_lock must be held on entry
1590 *
1591 * Return: 0, if successful, else errno
1592 */
1593static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1594				  struct list_head *target_list)
1595{
1596	int ret;
1597
1598	if (strong) {
1599		if (ref->data.strong == 0) {
1600			ret = binder_inc_node(ref->node, 1, 1, target_list);
1601			if (ret)
1602				return ret;
1603		}
1604		ref->data.strong++;
1605	} else {
1606		if (ref->data.weak == 0) {
1607			ret = binder_inc_node(ref->node, 0, 1, target_list);
1608			if (ret)
1609				return ret;
1610		}
1611		ref->data.weak++;
1612	}
1613	return 0;
1614}
1615
1616/**
1617 * binder_dec_ref() - dec the ref for given handle
1618 * @ref:	ref to be decremented
1619 * @strong:	if true, strong decrement, else weak
1620 *
1621 * Decrement the ref.
1622 *
1623 * Return: true if ref is cleaned up and ready to be freed
1624 */
1625static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1626{
1627	if (strong) {
1628		if (ref->data.strong == 0) {
1629			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1630					  ref->proc->pid, ref->data.debug_id,
1631					  ref->data.desc, ref->data.strong,
1632					  ref->data.weak);
1633			return false;
1634		}
1635		ref->data.strong--;
1636		if (ref->data.strong == 0)
1637			binder_dec_node(ref->node, strong, 1);
1638	} else {
1639		if (ref->data.weak == 0) {
1640			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1641					  ref->proc->pid, ref->data.debug_id,
1642					  ref->data.desc, ref->data.strong,
1643					  ref->data.weak);
1644			return false;
1645		}
1646		ref->data.weak--;
1647	}
1648	if (ref->data.strong == 0 && ref->data.weak == 0) {
1649		binder_cleanup_ref_olocked(ref);
1650		return true;
1651	}
1652	return false;
1653}
1654
1655/**
1656 * binder_get_node_from_ref() - get the node from the given proc/desc
1657 * @proc:	proc containing the ref
1658 * @desc:	the handle associated with the ref
1659 * @need_strong_ref: if true, only return node if ref is strong
1660 * @rdata:	the id/refcount data for the ref
1661 *
1662 * Given a proc and ref handle, return the associated binder_node
1663 *
1664 * Return: a binder_node or NULL if not found or not strong when strong required
1665 */
1666static struct binder_node *binder_get_node_from_ref(
1667		struct binder_proc *proc,
1668		u32 desc, bool need_strong_ref,
1669		struct binder_ref_data *rdata)
1670{
1671	struct binder_node *node;
1672	struct binder_ref *ref;
1673
1674	binder_proc_lock(proc);
1675	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1676	if (!ref)
1677		goto err_no_ref;
1678	node = ref->node;
1679	/*
1680	 * Take an implicit reference on the node to ensure
1681	 * it stays alive until the call to binder_put_node()
1682	 */
1683	binder_inc_node_tmpref(node);
1684	if (rdata)
1685		*rdata = ref->data;
1686	binder_proc_unlock(proc);
1687
1688	return node;
1689
1690err_no_ref:
1691	binder_proc_unlock(proc);
1692	return NULL;
1693}
1694
1695/**
1696 * binder_free_ref() - free the binder_ref
1697 * @ref:	ref to free
1698 *
1699 * Free the binder_ref. Free the binder_node indicated by ref->node
1700 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1701 */
1702static void binder_free_ref(struct binder_ref *ref)
1703{
1704	if (ref->node)
1705		binder_free_node(ref->node);
1706	kfree(ref->death);
1707	kfree(ref);
1708}
1709
1710/**
1711 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1712 * @proc:	proc containing the ref
1713 * @desc:	the handle associated with the ref
1714 * @increment:	true=inc reference, false=dec reference
1715 * @strong:	true=strong reference, false=weak reference
1716 * @rdata:	the id/refcount data for the ref
1717 *
1718 * Given a proc and ref handle, increment or decrement the ref
1719 * according to "increment" arg.
1720 *
1721 * Return: 0 if successful, else errno
1722 */
1723static int binder_update_ref_for_handle(struct binder_proc *proc,
1724		uint32_t desc, bool increment, bool strong,
1725		struct binder_ref_data *rdata)
1726{
1727	int ret = 0;
1728	struct binder_ref *ref;
1729	bool delete_ref = false;
1730
1731	binder_proc_lock(proc);
1732	ref = binder_get_ref_olocked(proc, desc, strong);
1733	if (!ref) {
1734		ret = -EINVAL;
1735		goto err_no_ref;
1736	}
1737	if (increment)
1738		ret = binder_inc_ref_olocked(ref, strong, NULL);
1739	else
1740		delete_ref = binder_dec_ref_olocked(ref, strong);
1741
1742	if (rdata)
1743		*rdata = ref->data;
1744	binder_proc_unlock(proc);
1745
1746	if (delete_ref)
1747		binder_free_ref(ref);
1748	return ret;
1749
1750err_no_ref:
1751	binder_proc_unlock(proc);
1752	return ret;
1753}
1754
1755/**
1756 * binder_dec_ref_for_handle() - dec the ref for given handle
1757 * @proc:	proc containing the ref
1758 * @desc:	the handle associated with the ref
1759 * @strong:	true=strong reference, false=weak reference
1760 * @rdata:	the id/refcount data for the ref
1761 *
1762 * Just calls binder_update_ref_for_handle() to decrement the ref.
1763 *
1764 * Return: 0 if successful, else errno
1765 */
1766static int binder_dec_ref_for_handle(struct binder_proc *proc,
1767		uint32_t desc, bool strong, struct binder_ref_data *rdata)
1768{
1769	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1770}
1771
1772
1773/**
1774 * binder_inc_ref_for_node() - increment the ref for given proc/node
1775 * @proc:	 proc containing the ref
1776 * @node:	 target node
1777 * @strong:	 true=strong reference, false=weak reference
1778 * @target_list: worklist to use if node is incremented
1779 * @rdata:	 the id/refcount data for the ref
1780 *
1781 * Given a proc and node, increment the ref. Create the ref if it
1782 * doesn't already exist
1783 *
1784 * Return: 0 if successful, else errno
1785 */
1786static int binder_inc_ref_for_node(struct binder_proc *proc,
1787			struct binder_node *node,
1788			bool strong,
1789			struct list_head *target_list,
1790			struct binder_ref_data *rdata)
1791{
1792	struct binder_ref *ref;
1793	struct binder_ref *new_ref = NULL;
1794	int ret = 0;
1795
1796	binder_proc_lock(proc);
1797	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1798	if (!ref) {
1799		binder_proc_unlock(proc);
1800		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1801		if (!new_ref)
1802			return -ENOMEM;
1803		binder_proc_lock(proc);
1804		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1805	}
1806	ret = binder_inc_ref_olocked(ref, strong, target_list);
1807	*rdata = ref->data;
1808	if (ret && ref == new_ref) {
1809		/*
1810		 * Cleanup the failed reference here as the target
1811		 * could now be dead and have already released its
1812		 * references by now. Calling on the new reference
1813		 * with strong=0 and a tmp_refs will not decrement
1814		 * the node. The new_ref gets kfree'd below.
1815		 */
1816		binder_cleanup_ref_olocked(new_ref);
1817		ref = NULL;
1818	}
1819
1820	binder_proc_unlock(proc);
1821	if (new_ref && ref != new_ref)
1822		/*
1823		 * Another thread created the ref first so
1824		 * free the one we allocated
1825		 */
1826		kfree(new_ref);
1827	return ret;
1828}
1829
1830static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1831					   struct binder_transaction *t)
1832{
1833	BUG_ON(!target_thread);
1834	assert_spin_locked(&target_thread->proc->inner_lock);
1835	BUG_ON(target_thread->transaction_stack != t);
1836	BUG_ON(target_thread->transaction_stack->from != target_thread);
1837	target_thread->transaction_stack =
1838		target_thread->transaction_stack->from_parent;
1839	t->from = NULL;
1840}
1841
1842/**
1843 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1844 * @thread:	thread to decrement
1845 *
1846 * A thread needs to be kept alive while being used to create or
1847 * handle a transaction. binder_get_txn_from() is used to safely
1848 * extract t->from from a binder_transaction and keep the thread
1849 * indicated by t->from from being freed. When done with that
1850 * binder_thread, this function is called to decrement the
1851 * tmp_ref and free if appropriate (thread has been released
1852 * and no transaction being processed by the driver)
1853 */
1854static void binder_thread_dec_tmpref(struct binder_thread *thread)
1855{
1856	/*
1857	 * atomic is used to protect the counter value while
1858	 * it cannot reach zero or thread->is_dead is false
1859	 */
1860	binder_inner_proc_lock(thread->proc);
1861	atomic_dec(&thread->tmp_ref);
1862	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1863		binder_inner_proc_unlock(thread->proc);
1864		binder_free_thread(thread);
1865		return;
1866	}
1867	binder_inner_proc_unlock(thread->proc);
1868}
1869
1870/**
1871 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1872 * @proc:	proc to decrement
1873 *
1874 * A binder_proc needs to be kept alive while being used to create or
1875 * handle a transaction. proc->tmp_ref is incremented when
1876 * creating a new transaction or the binder_proc is currently in-use
1877 * by threads that are being released. When done with the binder_proc,
1878 * this function is called to decrement the counter and free the
1879 * proc if appropriate (proc has been released, all threads have
1880 * been released and not currenly in-use to process a transaction).
1881 */
1882static void binder_proc_dec_tmpref(struct binder_proc *proc)
1883{
1884	binder_inner_proc_lock(proc);
1885	proc->tmp_ref--;
1886	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1887			!proc->tmp_ref) {
1888		binder_inner_proc_unlock(proc);
1889		binder_free_proc(proc);
1890		return;
1891	}
1892	binder_inner_proc_unlock(proc);
1893}
1894
1895/**
1896 * binder_get_txn_from() - safely extract the "from" thread in transaction
1897 * @t:	binder transaction for t->from
1898 *
1899 * Atomically return the "from" thread and increment the tmp_ref
1900 * count for the thread to ensure it stays alive until
1901 * binder_thread_dec_tmpref() is called.
1902 *
1903 * Return: the value of t->from
1904 */
1905static struct binder_thread *binder_get_txn_from(
1906		struct binder_transaction *t)
1907{
1908	struct binder_thread *from;
1909
1910	spin_lock(&t->lock);
1911	from = t->from;
1912	if (from)
1913		atomic_inc(&from->tmp_ref);
1914	spin_unlock(&t->lock);
1915	return from;
1916}
1917
1918/**
1919 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1920 * @t:	binder transaction for t->from
1921 *
1922 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1923 * to guarantee that the thread cannot be released while operating on it.
1924 * The caller must call binder_inner_proc_unlock() to release the inner lock
1925 * as well as call binder_dec_thread_txn() to release the reference.
1926 *
1927 * Return: the value of t->from
1928 */
1929static struct binder_thread *binder_get_txn_from_and_acq_inner(
1930		struct binder_transaction *t)
1931	__acquires(&t->from->proc->inner_lock)
1932{
1933	struct binder_thread *from;
1934
1935	from = binder_get_txn_from(t);
1936	if (!from) {
1937		__acquire(&from->proc->inner_lock);
1938		return NULL;
1939	}
1940	binder_inner_proc_lock(from->proc);
1941	if (t->from) {
1942		BUG_ON(from != t->from);
1943		return from;
1944	}
1945	binder_inner_proc_unlock(from->proc);
1946	__acquire(&from->proc->inner_lock);
1947	binder_thread_dec_tmpref(from);
1948	return NULL;
1949}
1950
1951/**
1952 * binder_free_txn_fixups() - free unprocessed fd fixups
1953 * @t:	binder transaction for t->from
1954 *
1955 * If the transaction is being torn down prior to being
1956 * processed by the target process, free all of the
1957 * fd fixups and fput the file structs. It is safe to
1958 * call this function after the fixups have been
1959 * processed -- in that case, the list will be empty.
1960 */
1961static void binder_free_txn_fixups(struct binder_transaction *t)
1962{
1963	struct binder_txn_fd_fixup *fixup, *tmp;
1964
1965	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1966		fput(fixup->file);
1967		list_del(&fixup->fixup_entry);
1968		kfree(fixup);
1969	}
1970}
1971
1972static void binder_free_transaction(struct binder_transaction *t)
1973{
1974	struct binder_proc *target_proc = t->to_proc;
1975
1976	if (target_proc) {
1977		binder_inner_proc_lock(target_proc);
1978		if (t->buffer)
1979			t->buffer->transaction = NULL;
1980		binder_inner_proc_unlock(target_proc);
1981	}
1982	/*
1983	 * If the transaction has no target_proc, then
1984	 * t->buffer->transaction has already been cleared.
1985	 */
1986	binder_free_txn_fixups(t);
1987	kfree(t);
1988	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1989}
1990
1991static void binder_send_failed_reply(struct binder_transaction *t,
1992				     uint32_t error_code)
1993{
1994	struct binder_thread *target_thread;
1995	struct binder_transaction *next;
1996
1997	BUG_ON(t->flags & TF_ONE_WAY);
1998	while (1) {
1999		target_thread = binder_get_txn_from_and_acq_inner(t);
2000		if (target_thread) {
2001			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2002				     "send failed reply for transaction %d to %d:%d\n",
2003				      t->debug_id,
2004				      target_thread->proc->pid,
2005				      target_thread->pid);
2006
2007			binder_pop_transaction_ilocked(target_thread, t);
2008			if (target_thread->reply_error.cmd == BR_OK) {
2009				target_thread->reply_error.cmd = error_code;
2010				binder_enqueue_thread_work_ilocked(
2011					target_thread,
2012					&target_thread->reply_error.work);
2013				wake_up_interruptible(&target_thread->wait);
2014			} else {
2015				/*
2016				 * Cannot get here for normal operation, but
2017				 * we can if multiple synchronous transactions
2018				 * are sent without blocking for responses.
2019				 * Just ignore the 2nd error in this case.
2020				 */
2021				pr_warn("Unexpected reply error: %u\n",
2022					target_thread->reply_error.cmd);
2023			}
2024			binder_inner_proc_unlock(target_thread->proc);
2025			binder_thread_dec_tmpref(target_thread);
2026			binder_free_transaction(t);
2027			return;
2028		}
2029		__release(&target_thread->proc->inner_lock);
2030		next = t->from_parent;
2031
2032		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2033			     "send failed reply for transaction %d, target dead\n",
2034			     t->debug_id);
2035
2036		binder_free_transaction(t);
2037		if (next == NULL) {
2038			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2039				     "reply failed, no target thread at root\n");
2040			return;
2041		}
2042		t = next;
2043		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2044			     "reply failed, no target thread -- retry %d\n",
2045			      t->debug_id);
2046	}
2047}
2048
2049/**
2050 * binder_cleanup_transaction() - cleans up undelivered transaction
2051 * @t:		transaction that needs to be cleaned up
2052 * @reason:	reason the transaction wasn't delivered
2053 * @error_code:	error to return to caller (if synchronous call)
2054 */
2055static void binder_cleanup_transaction(struct binder_transaction *t,
2056				       const char *reason,
2057				       uint32_t error_code)
2058{
2059	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2060		binder_send_failed_reply(t, error_code);
2061	} else {
2062		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2063			"undelivered transaction %d, %s\n",
2064			t->debug_id, reason);
2065		binder_free_transaction(t);
2066	}
2067}
2068
2069/**
2070 * binder_get_object() - gets object and checks for valid metadata
2071 * @proc:	binder_proc owning the buffer
2072 * @u:		sender's user pointer to base of buffer
2073 * @buffer:	binder_buffer that we're parsing.
2074 * @offset:	offset in the @buffer at which to validate an object.
2075 * @object:	struct binder_object to read into
2076 *
2077 * Copy the binder object at the given offset into @object. If @u is
2078 * provided then the copy is from the sender's buffer. If not, then
2079 * it is copied from the target's @buffer.
2080 *
2081 * Return:	If there's a valid metadata object at @offset, the
2082 *		size of that object. Otherwise, it returns zero. The object
2083 *		is read into the struct binder_object pointed to by @object.
2084 */
2085static size_t binder_get_object(struct binder_proc *proc,
2086				const void __user *u,
2087				struct binder_buffer *buffer,
2088				unsigned long offset,
2089				struct binder_object *object)
2090{
2091	size_t read_size;
2092	struct binder_object_header *hdr;
2093	size_t object_size = 0;
2094
2095	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2096	if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2097	    !IS_ALIGNED(offset, sizeof(u32)))
2098		return 0;
2099
2100	if (u) {
2101		if (copy_from_user(object, u + offset, read_size))
2102			return 0;
2103	} else {
2104		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2105						  offset, read_size))
2106			return 0;
2107	}
2108
2109	/* Ok, now see if we read a complete object. */
2110	hdr = &object->hdr;
2111	switch (hdr->type) {
2112	case BINDER_TYPE_BINDER:
2113	case BINDER_TYPE_WEAK_BINDER:
2114	case BINDER_TYPE_HANDLE:
2115	case BINDER_TYPE_WEAK_HANDLE:
2116		object_size = sizeof(struct flat_binder_object);
2117		break;
2118	case BINDER_TYPE_FD:
2119		object_size = sizeof(struct binder_fd_object);
2120		break;
2121	case BINDER_TYPE_PTR:
2122		object_size = sizeof(struct binder_buffer_object);
2123		break;
2124	case BINDER_TYPE_FDA:
2125		object_size = sizeof(struct binder_fd_array_object);
2126		break;
2127	default:
2128		return 0;
2129	}
2130	if (offset <= buffer->data_size - object_size &&
2131	    buffer->data_size >= object_size)
2132		return object_size;
2133	else
2134		return 0;
2135}
2136
2137/**
2138 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2139 * @proc:	binder_proc owning the buffer
2140 * @b:		binder_buffer containing the object
2141 * @object:	struct binder_object to read into
2142 * @index:	index in offset array at which the binder_buffer_object is
2143 *		located
2144 * @start_offset: points to the start of the offset array
2145 * @object_offsetp: offset of @object read from @b
2146 * @num_valid:	the number of valid offsets in the offset array
2147 *
2148 * Return:	If @index is within the valid range of the offset array
2149 *		described by @start and @num_valid, and if there's a valid
2150 *		binder_buffer_object at the offset found in index @index
2151 *		of the offset array, that object is returned. Otherwise,
2152 *		%NULL is returned.
2153 *		Note that the offset found in index @index itself is not
2154 *		verified; this function assumes that @num_valid elements
2155 *		from @start were previously verified to have valid offsets.
2156 *		If @object_offsetp is non-NULL, then the offset within
2157 *		@b is written to it.
2158 */
2159static struct binder_buffer_object *binder_validate_ptr(
2160						struct binder_proc *proc,
2161						struct binder_buffer *b,
2162						struct binder_object *object,
2163						binder_size_t index,
2164						binder_size_t start_offset,
2165						binder_size_t *object_offsetp,
2166						binder_size_t num_valid)
2167{
2168	size_t object_size;
2169	binder_size_t object_offset;
2170	unsigned long buffer_offset;
2171
2172	if (index >= num_valid)
2173		return NULL;
2174
2175	buffer_offset = start_offset + sizeof(binder_size_t) * index;
2176	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2177					  b, buffer_offset,
2178					  sizeof(object_offset)))
2179		return NULL;
2180	object_size = binder_get_object(proc, NULL, b, object_offset, object);
2181	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
2182		return NULL;
2183	if (object_offsetp)
2184		*object_offsetp = object_offset;
2185
2186	return &object->bbo;
2187}
2188
2189/**
2190 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2191 * @proc:		binder_proc owning the buffer
2192 * @b:			transaction buffer
2193 * @objects_start_offset: offset to start of objects buffer
2194 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
2195 * @fixup_offset:	start offset in @buffer to fix up
2196 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
2197 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
2198 *
2199 * Return:		%true if a fixup in buffer @buffer at offset @offset is
2200 *			allowed.
2201 *
2202 * For safety reasons, we only allow fixups inside a buffer to happen
2203 * at increasing offsets; additionally, we only allow fixup on the last
2204 * buffer object that was verified, or one of its parents.
2205 *
2206 * Example of what is allowed:
2207 *
2208 * A
2209 *   B (parent = A, offset = 0)
2210 *   C (parent = A, offset = 16)
2211 *     D (parent = C, offset = 0)
2212 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2213 *
2214 * Examples of what is not allowed:
2215 *
2216 * Decreasing offsets within the same parent:
2217 * A
2218 *   C (parent = A, offset = 16)
2219 *   B (parent = A, offset = 0) // decreasing offset within A
2220 *
2221 * Referring to a parent that wasn't the last object or any of its parents:
2222 * A
2223 *   B (parent = A, offset = 0)
2224 *   C (parent = A, offset = 0)
2225 *   C (parent = A, offset = 16)
2226 *     D (parent = B, offset = 0) // B is not A or any of A's parents
2227 */
2228static bool binder_validate_fixup(struct binder_proc *proc,
2229				  struct binder_buffer *b,
2230				  binder_size_t objects_start_offset,
2231				  binder_size_t buffer_obj_offset,
2232				  binder_size_t fixup_offset,
2233				  binder_size_t last_obj_offset,
2234				  binder_size_t last_min_offset)
2235{
2236	if (!last_obj_offset) {
2237		/* Nothing to fix up in */
2238		return false;
2239	}
2240
2241	while (last_obj_offset != buffer_obj_offset) {
2242		unsigned long buffer_offset;
2243		struct binder_object last_object;
2244		struct binder_buffer_object *last_bbo;
2245		size_t object_size = binder_get_object(proc, NULL, b,
2246						       last_obj_offset,
2247						       &last_object);
2248		if (object_size != sizeof(*last_bbo))
2249			return false;
2250
2251		last_bbo = &last_object.bbo;
2252		/*
2253		 * Safe to retrieve the parent of last_obj, since it
2254		 * was already previously verified by the driver.
2255		 */
2256		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2257			return false;
2258		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
2259		buffer_offset = objects_start_offset +
2260			sizeof(binder_size_t) * last_bbo->parent;
2261		if (binder_alloc_copy_from_buffer(&proc->alloc,
2262						  &last_obj_offset,
2263						  b, buffer_offset,
2264						  sizeof(last_obj_offset)))
2265			return false;
2266	}
2267	return (fixup_offset >= last_min_offset);
2268}
2269
2270/**
2271 * struct binder_task_work_cb - for deferred close
2272 *
2273 * @twork:                callback_head for task work
2274 * @fd:                   fd to close
2275 *
2276 * Structure to pass task work to be handled after
2277 * returning from binder_ioctl() via task_work_add().
2278 */
2279struct binder_task_work_cb {
2280	struct callback_head twork;
2281	struct file *file;
2282};
2283
2284/**
2285 * binder_do_fd_close() - close list of file descriptors
2286 * @twork:	callback head for task work
2287 *
2288 * It is not safe to call ksys_close() during the binder_ioctl()
2289 * function if there is a chance that binder's own file descriptor
2290 * might be closed. This is to meet the requirements for using
2291 * fdget() (see comments for __fget_light()). Therefore use
2292 * task_work_add() to schedule the close operation once we have
2293 * returned from binder_ioctl(). This function is a callback
2294 * for that mechanism and does the actual ksys_close() on the
2295 * given file descriptor.
2296 */
2297static void binder_do_fd_close(struct callback_head *twork)
2298{
2299	struct binder_task_work_cb *twcb = container_of(twork,
2300			struct binder_task_work_cb, twork);
2301
2302	fput(twcb->file);
2303	kfree(twcb);
2304}
2305
2306/**
2307 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
2308 * @fd:		file-descriptor to close
2309 *
2310 * See comments in binder_do_fd_close(). This function is used to schedule
2311 * a file-descriptor to be closed after returning from binder_ioctl().
2312 */
2313static void binder_deferred_fd_close(int fd)
2314{
2315	struct binder_task_work_cb *twcb;
2316
2317	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
2318	if (!twcb)
2319		return;
2320	init_task_work(&twcb->twork, binder_do_fd_close);
2321	close_fd_get_file(fd, &twcb->file);
2322	if (twcb->file) {
2323		filp_close(twcb->file, current->files);
2324		task_work_add(current, &twcb->twork, TWA_RESUME);
2325	} else {
2326		kfree(twcb);
2327	}
2328}
2329
2330static void binder_transaction_buffer_release(struct binder_proc *proc,
2331					      struct binder_thread *thread,
2332					      struct binder_buffer *buffer,
2333					      binder_size_t off_end_offset,
2334					      bool is_failure)
2335{
2336	int debug_id = buffer->debug_id;
2337	binder_size_t off_start_offset, buffer_offset;
2338
2339	binder_debug(BINDER_DEBUG_TRANSACTION,
2340		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
2341		     proc->pid, buffer->debug_id,
2342		     buffer->data_size, buffer->offsets_size,
2343		     (unsigned long long)off_end_offset);
2344
2345	if (buffer->target_node)
2346		binder_dec_node(buffer->target_node, 1, 0);
2347
2348	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
2349
2350	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2351	     buffer_offset += sizeof(binder_size_t)) {
2352		struct binder_object_header *hdr;
2353		size_t object_size = 0;
2354		struct binder_object object;
2355		binder_size_t object_offset;
2356
2357		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
2358						   buffer, buffer_offset,
2359						   sizeof(object_offset)))
2360			object_size = binder_get_object(proc, NULL, buffer,
2361							object_offset, &object);
2362		if (object_size == 0) {
2363			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2364			       debug_id, (u64)object_offset, buffer->data_size);
2365			continue;
2366		}
2367		hdr = &object.hdr;
2368		switch (hdr->type) {
2369		case BINDER_TYPE_BINDER:
2370		case BINDER_TYPE_WEAK_BINDER: {
2371			struct flat_binder_object *fp;
2372			struct binder_node *node;
2373
2374			fp = to_flat_binder_object(hdr);
2375			node = binder_get_node(proc, fp->binder);
2376			if (node == NULL) {
2377				pr_err("transaction release %d bad node %016llx\n",
2378				       debug_id, (u64)fp->binder);
2379				break;
2380			}
2381			binder_debug(BINDER_DEBUG_TRANSACTION,
2382				     "        node %d u%016llx\n",
2383				     node->debug_id, (u64)node->ptr);
2384			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2385					0);
2386			binder_put_node(node);
2387		} break;
2388		case BINDER_TYPE_HANDLE:
2389		case BINDER_TYPE_WEAK_HANDLE: {
2390			struct flat_binder_object *fp;
2391			struct binder_ref_data rdata;
2392			int ret;
2393
2394			fp = to_flat_binder_object(hdr);
2395			ret = binder_dec_ref_for_handle(proc, fp->handle,
2396				hdr->type == BINDER_TYPE_HANDLE, &rdata);
2397
2398			if (ret) {
2399				pr_err("transaction release %d bad handle %d, ret = %d\n",
2400				 debug_id, fp->handle, ret);
2401				break;
2402			}
2403			binder_debug(BINDER_DEBUG_TRANSACTION,
2404				     "        ref %d desc %d\n",
2405				     rdata.debug_id, rdata.desc);
2406		} break;
2407
2408		case BINDER_TYPE_FD: {
2409			/*
2410			 * No need to close the file here since user-space
2411			 * closes it for for successfully delivered
2412			 * transactions. For transactions that weren't
2413			 * delivered, the new fd was never allocated so
2414			 * there is no need to close and the fput on the
2415			 * file is done when the transaction is torn
2416			 * down.
2417			 */
2418		} break;
2419		case BINDER_TYPE_PTR:
2420			/*
2421			 * Nothing to do here, this will get cleaned up when the
2422			 * transaction buffer gets freed
2423			 */
2424			break;
2425		case BINDER_TYPE_FDA: {
2426			struct binder_fd_array_object *fda;
2427			struct binder_buffer_object *parent;
2428			struct binder_object ptr_object;
2429			binder_size_t fda_offset;
2430			size_t fd_index;
2431			binder_size_t fd_buf_size;
2432			binder_size_t num_valid;
2433
2434			if (is_failure) {
2435				/*
2436				 * The fd fixups have not been applied so no
2437				 * fds need to be closed.
2438				 */
2439				continue;
2440			}
2441
2442			num_valid = (buffer_offset - off_start_offset) /
2443						sizeof(binder_size_t);
2444			fda = to_binder_fd_array_object(hdr);
2445			parent = binder_validate_ptr(proc, buffer, &ptr_object,
2446						     fda->parent,
2447						     off_start_offset,
2448						     NULL,
2449						     num_valid);
2450			if (!parent) {
2451				pr_err("transaction release %d bad parent offset\n",
2452				       debug_id);
2453				continue;
2454			}
2455			fd_buf_size = sizeof(u32) * fda->num_fds;
2456			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2457				pr_err("transaction release %d invalid number of fds (%lld)\n",
2458				       debug_id, (u64)fda->num_fds);
2459				continue;
2460			}
2461			if (fd_buf_size > parent->length ||
2462			    fda->parent_offset > parent->length - fd_buf_size) {
2463				/* No space for all file descriptors here. */
2464				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2465				       debug_id, (u64)fda->num_fds);
2466				continue;
2467			}
2468			/*
2469			 * the source data for binder_buffer_object is visible
2470			 * to user-space and the @buffer element is the user
2471			 * pointer to the buffer_object containing the fd_array.
2472			 * Convert the address to an offset relative to
2473			 * the base of the transaction buffer.
2474			 */
2475			fda_offset =
2476			    (parent->buffer - (uintptr_t)buffer->user_data) +
2477			    fda->parent_offset;
2478			for (fd_index = 0; fd_index < fda->num_fds;
2479			     fd_index++) {
2480				u32 fd;
2481				int err;
2482				binder_size_t offset = fda_offset +
2483					fd_index * sizeof(fd);
2484
2485				err = binder_alloc_copy_from_buffer(
2486						&proc->alloc, &fd, buffer,
2487						offset, sizeof(fd));
2488				WARN_ON(err);
2489				if (!err) {
2490					binder_deferred_fd_close(fd);
2491					/*
2492					 * Need to make sure the thread goes
2493					 * back to userspace to complete the
2494					 * deferred close
2495					 */
2496					if (thread)
2497						thread->looper_need_return = true;
2498				}
2499			}
2500		} break;
2501		default:
2502			pr_err("transaction release %d bad object type %x\n",
2503				debug_id, hdr->type);
2504			break;
2505		}
2506	}
2507}
2508
2509/* Clean up all the objects in the buffer */
2510static inline void binder_release_entire_buffer(struct binder_proc *proc,
2511						struct binder_thread *thread,
2512						struct binder_buffer *buffer,
2513						bool is_failure)
2514{
2515	binder_size_t off_end_offset;
2516
2517	off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
2518	off_end_offset += buffer->offsets_size;
2519
2520	binder_transaction_buffer_release(proc, thread, buffer,
2521					  off_end_offset, is_failure);
2522}
2523
2524static int binder_translate_binder(struct flat_binder_object *fp,
2525				   struct binder_transaction *t,
2526				   struct binder_thread *thread)
2527{
2528	struct binder_node *node;
2529	struct binder_proc *proc = thread->proc;
2530	struct binder_proc *target_proc = t->to_proc;
2531	struct binder_ref_data rdata;
2532	int ret = 0;
2533
2534	node = binder_get_node(proc, fp->binder);
2535	if (!node) {
2536		node = binder_new_node(proc, fp);
2537		if (!node)
2538			return -ENOMEM;
2539	}
2540	if (fp->cookie != node->cookie) {
2541		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2542				  proc->pid, thread->pid, (u64)fp->binder,
2543				  node->debug_id, (u64)fp->cookie,
2544				  (u64)node->cookie);
2545		ret = -EINVAL;
2546		goto done;
2547	}
2548	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2549		ret = -EPERM;
2550		goto done;
2551	}
2552
2553	ret = binder_inc_ref_for_node(target_proc, node,
2554			fp->hdr.type == BINDER_TYPE_BINDER,
2555			&thread->todo, &rdata);
2556	if (ret)
2557		goto done;
2558
2559	if (fp->hdr.type == BINDER_TYPE_BINDER)
2560		fp->hdr.type = BINDER_TYPE_HANDLE;
2561	else
2562		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2563	fp->binder = 0;
2564	fp->handle = rdata.desc;
2565	fp->cookie = 0;
2566
2567	trace_binder_transaction_node_to_ref(t, node, &rdata);
2568	binder_debug(BINDER_DEBUG_TRANSACTION,
2569		     "        node %d u%016llx -> ref %d desc %d\n",
2570		     node->debug_id, (u64)node->ptr,
2571		     rdata.debug_id, rdata.desc);
2572done:
2573	binder_put_node(node);
2574	return ret;
2575}
2576
2577static int binder_translate_handle(struct flat_binder_object *fp,
2578				   struct binder_transaction *t,
2579				   struct binder_thread *thread)
2580{
2581	struct binder_proc *proc = thread->proc;
2582	struct binder_proc *target_proc = t->to_proc;
2583	struct binder_node *node;
2584	struct binder_ref_data src_rdata;
2585	int ret = 0;
2586
2587	node = binder_get_node_from_ref(proc, fp->handle,
2588			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2589	if (!node) {
2590		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2591				  proc->pid, thread->pid, fp->handle);
2592		return -EINVAL;
2593	}
2594	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2595		ret = -EPERM;
2596		goto done;
2597	}
2598
2599	binder_node_lock(node);
2600	if (node->proc == target_proc) {
2601		if (fp->hdr.type == BINDER_TYPE_HANDLE)
2602			fp->hdr.type = BINDER_TYPE_BINDER;
2603		else
2604			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2605		fp->binder = node->ptr;
2606		fp->cookie = node->cookie;
2607		if (node->proc)
2608			binder_inner_proc_lock(node->proc);
2609		else
2610			__acquire(&node->proc->inner_lock);
2611		binder_inc_node_nilocked(node,
2612					 fp->hdr.type == BINDER_TYPE_BINDER,
2613					 0, NULL);
2614		if (node->proc)
2615			binder_inner_proc_unlock(node->proc);
2616		else
2617			__release(&node->proc->inner_lock);
2618		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2619		binder_debug(BINDER_DEBUG_TRANSACTION,
2620			     "        ref %d desc %d -> node %d u%016llx\n",
2621			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
2622			     (u64)node->ptr);
2623		binder_node_unlock(node);
2624	} else {
2625		struct binder_ref_data dest_rdata;
2626
2627		binder_node_unlock(node);
2628		ret = binder_inc_ref_for_node(target_proc, node,
2629				fp->hdr.type == BINDER_TYPE_HANDLE,
2630				NULL, &dest_rdata);
2631		if (ret)
2632			goto done;
2633
2634		fp->binder = 0;
2635		fp->handle = dest_rdata.desc;
2636		fp->cookie = 0;
2637		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2638						    &dest_rdata);
2639		binder_debug(BINDER_DEBUG_TRANSACTION,
2640			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2641			     src_rdata.debug_id, src_rdata.desc,
2642			     dest_rdata.debug_id, dest_rdata.desc,
2643			     node->debug_id);
2644	}
2645done:
2646	binder_put_node(node);
2647	return ret;
2648}
2649
2650static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2651			       struct binder_transaction *t,
2652			       struct binder_thread *thread,
2653			       struct binder_transaction *in_reply_to)
2654{
2655	struct binder_proc *proc = thread->proc;
2656	struct binder_proc *target_proc = t->to_proc;
2657	struct binder_txn_fd_fixup *fixup;
2658	struct file *file;
2659	int ret = 0;
2660	bool target_allows_fd;
2661
2662	if (in_reply_to)
2663		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2664	else
2665		target_allows_fd = t->buffer->target_node->accept_fds;
2666	if (!target_allows_fd) {
2667		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2668				  proc->pid, thread->pid,
2669				  in_reply_to ? "reply" : "transaction",
2670				  fd);
2671		ret = -EPERM;
2672		goto err_fd_not_accepted;
2673	}
2674
2675	file = fget(fd);
2676	if (!file) {
2677		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2678				  proc->pid, thread->pid, fd);
2679		ret = -EBADF;
2680		goto err_fget;
2681	}
2682	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2683	if (ret < 0) {
2684		ret = -EPERM;
2685		goto err_security;
2686	}
2687
2688	/*
2689	 * Add fixup record for this transaction. The allocation
2690	 * of the fd in the target needs to be done from a
2691	 * target thread.
2692	 */
2693	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2694	if (!fixup) {
2695		ret = -ENOMEM;
2696		goto err_alloc;
2697	}
2698	fixup->file = file;
2699	fixup->offset = fd_offset;
2700	trace_binder_transaction_fd_send(t, fd, fixup->offset);
2701	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2702
2703	return ret;
2704
2705err_alloc:
2706err_security:
2707	fput(file);
2708err_fget:
2709err_fd_not_accepted:
2710	return ret;
2711}
2712
2713/**
2714 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2715 * @offset	offset in target buffer to fixup
2716 * @skip_size	bytes to skip in copy (fixup will be written later)
2717 * @fixup_data	data to write at fixup offset
2718 * @node	list node
2719 *
2720 * This is used for the pointer fixup list (pf) which is created and consumed
2721 * during binder_transaction() and is only accessed locally. No
2722 * locking is necessary.
2723 *
2724 * The list is ordered by @offset.
2725 */
2726struct binder_ptr_fixup {
2727	binder_size_t offset;
2728	size_t skip_size;
2729	binder_uintptr_t fixup_data;
2730	struct list_head node;
2731};
2732
2733/**
2734 * struct binder_sg_copy - scatter-gather data to be copied
2735 * @offset		offset in target buffer
2736 * @sender_uaddr	user address in source buffer
2737 * @length		bytes to copy
2738 * @node		list node
2739 *
2740 * This is used for the sg copy list (sgc) which is created and consumed
2741 * during binder_transaction() and is only accessed locally. No
2742 * locking is necessary.
2743 *
2744 * The list is ordered by @offset.
2745 */
2746struct binder_sg_copy {
2747	binder_size_t offset;
2748	const void __user *sender_uaddr;
2749	size_t length;
2750	struct list_head node;
2751};
2752
2753/**
2754 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2755 * @alloc:	binder_alloc associated with @buffer
2756 * @buffer:	binder buffer in target process
2757 * @sgc_head:	list_head of scatter-gather copy list
2758 * @pf_head:	list_head of pointer fixup list
2759 *
2760 * Processes all elements of @sgc_head, applying fixups from @pf_head
2761 * and copying the scatter-gather data from the source process' user
2762 * buffer to the target's buffer. It is expected that the list creation
2763 * and processing all occurs during binder_transaction() so these lists
2764 * are only accessed in local context.
2765 *
2766 * Return: 0=success, else -errno
2767 */
2768static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2769					 struct binder_buffer *buffer,
2770					 struct list_head *sgc_head,
2771					 struct list_head *pf_head)
2772{
2773	int ret = 0;
2774	struct binder_sg_copy *sgc, *tmpsgc;
2775	struct binder_ptr_fixup *tmppf;
2776	struct binder_ptr_fixup *pf =
2777		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2778					 node);
2779
2780	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2781		size_t bytes_copied = 0;
2782
2783		while (bytes_copied < sgc->length) {
2784			size_t copy_size;
2785			size_t bytes_left = sgc->length - bytes_copied;
2786			size_t offset = sgc->offset + bytes_copied;
2787
2788			/*
2789			 * We copy up to the fixup (pointed to by pf)
2790			 */
2791			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2792				       : bytes_left;
2793			if (!ret && copy_size)
2794				ret = binder_alloc_copy_user_to_buffer(
2795						alloc, buffer,
2796						offset,
2797						sgc->sender_uaddr + bytes_copied,
2798						copy_size);
2799			bytes_copied += copy_size;
2800			if (copy_size != bytes_left) {
2801				BUG_ON(!pf);
2802				/* we stopped at a fixup offset */
2803				if (pf->skip_size) {
2804					/*
2805					 * we are just skipping. This is for
2806					 * BINDER_TYPE_FDA where the translated
2807					 * fds will be fixed up when we get
2808					 * to target context.
2809					 */
2810					bytes_copied += pf->skip_size;
2811				} else {
2812					/* apply the fixup indicated by pf */
2813					if (!ret)
2814						ret = binder_alloc_copy_to_buffer(
2815							alloc, buffer,
2816							pf->offset,
2817							&pf->fixup_data,
2818							sizeof(pf->fixup_data));
2819					bytes_copied += sizeof(pf->fixup_data);
2820				}
2821				list_del(&pf->node);
2822				kfree(pf);
2823				pf = list_first_entry_or_null(pf_head,
2824						struct binder_ptr_fixup, node);
2825			}
2826		}
2827		list_del(&sgc->node);
2828		kfree(sgc);
2829	}
2830	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2831		BUG_ON(pf->skip_size == 0);
2832		list_del(&pf->node);
2833		kfree(pf);
2834	}
2835	BUG_ON(!list_empty(sgc_head));
2836
2837	return ret > 0 ? -EINVAL : ret;
2838}
2839
2840/**
2841 * binder_cleanup_deferred_txn_lists() - free specified lists
2842 * @sgc_head:	list_head of scatter-gather copy list
2843 * @pf_head:	list_head of pointer fixup list
2844 *
2845 * Called to clean up @sgc_head and @pf_head if there is an
2846 * error.
2847 */
2848static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2849					      struct list_head *pf_head)
2850{
2851	struct binder_sg_copy *sgc, *tmpsgc;
2852	struct binder_ptr_fixup *pf, *tmppf;
2853
2854	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2855		list_del(&sgc->node);
2856		kfree(sgc);
2857	}
2858	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2859		list_del(&pf->node);
2860		kfree(pf);
2861	}
2862}
2863
2864/**
2865 * binder_defer_copy() - queue a scatter-gather buffer for copy
2866 * @sgc_head:		list_head of scatter-gather copy list
2867 * @offset:		binder buffer offset in target process
2868 * @sender_uaddr:	user address in source process
2869 * @length:		bytes to copy
2870 *
2871 * Specify a scatter-gather block to be copied. The actual copy must
2872 * be deferred until all the needed fixups are identified and queued.
2873 * Then the copy and fixups are done together so un-translated values
2874 * from the source are never visible in the target buffer.
2875 *
2876 * We are guaranteed that repeated calls to this function will have
2877 * monotonically increasing @offset values so the list will naturally
2878 * be ordered.
2879 *
2880 * Return: 0=success, else -errno
2881 */
2882static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2883			     const void __user *sender_uaddr, size_t length)
2884{
2885	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2886
2887	if (!bc)
2888		return -ENOMEM;
2889
2890	bc->offset = offset;
2891	bc->sender_uaddr = sender_uaddr;
2892	bc->length = length;
2893	INIT_LIST_HEAD(&bc->node);
2894
2895	/*
2896	 * We are guaranteed that the deferred copies are in-order
2897	 * so just add to the tail.
2898	 */
2899	list_add_tail(&bc->node, sgc_head);
2900
2901	return 0;
2902}
2903
2904/**
2905 * binder_add_fixup() - queue a fixup to be applied to sg copy
2906 * @pf_head:	list_head of binder ptr fixup list
2907 * @offset:	binder buffer offset in target process
2908 * @fixup:	bytes to be copied for fixup
2909 * @skip_size:	bytes to skip when copying (fixup will be applied later)
2910 *
2911 * Add the specified fixup to a list ordered by @offset. When copying
2912 * the scatter-gather buffers, the fixup will be copied instead of
2913 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2914 * will be applied later (in target process context), so we just skip
2915 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2916 * value in @fixup.
2917 *
2918 * This function is called *mostly* in @offset order, but there are
2919 * exceptions. Since out-of-order inserts are relatively uncommon,
2920 * we insert the new element by searching backward from the tail of
2921 * the list.
2922 *
2923 * Return: 0=success, else -errno
2924 */
2925static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2926			    binder_uintptr_t fixup, size_t skip_size)
2927{
2928	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2929	struct binder_ptr_fixup *tmppf;
2930
2931	if (!pf)
2932		return -ENOMEM;
2933
2934	pf->offset = offset;
2935	pf->fixup_data = fixup;
2936	pf->skip_size = skip_size;
2937	INIT_LIST_HEAD(&pf->node);
2938
2939	/* Fixups are *mostly* added in-order, but there are some
2940	 * exceptions. Look backwards through list for insertion point.
2941	 */
2942	list_for_each_entry_reverse(tmppf, pf_head, node) {
2943		if (tmppf->offset < pf->offset) {
2944			list_add(&pf->node, &tmppf->node);
2945			return 0;
2946		}
2947	}
2948	/*
2949	 * if we get here, then the new offset is the lowest so
2950	 * insert at the head
2951	 */
2952	list_add(&pf->node, pf_head);
2953	return 0;
2954}
2955
2956static int binder_translate_fd_array(struct list_head *pf_head,
2957				     struct binder_fd_array_object *fda,
2958				     const void __user *sender_ubuffer,
2959				     struct binder_buffer_object *parent,
2960				     struct binder_buffer_object *sender_uparent,
2961				     struct binder_transaction *t,
2962				     struct binder_thread *thread,
2963				     struct binder_transaction *in_reply_to)
2964{
2965	binder_size_t fdi, fd_buf_size;
2966	binder_size_t fda_offset;
2967	const void __user *sender_ufda_base;
2968	struct binder_proc *proc = thread->proc;
2969	int ret;
2970
2971	if (fda->num_fds == 0)
2972		return 0;
2973
2974	fd_buf_size = sizeof(u32) * fda->num_fds;
2975	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2976		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2977				  proc->pid, thread->pid, (u64)fda->num_fds);
2978		return -EINVAL;
2979	}
2980	if (fd_buf_size > parent->length ||
2981	    fda->parent_offset > parent->length - fd_buf_size) {
2982		/* No space for all file descriptors here. */
2983		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2984				  proc->pid, thread->pid, (u64)fda->num_fds);
2985		return -EINVAL;
2986	}
2987	/*
2988	 * the source data for binder_buffer_object is visible
2989	 * to user-space and the @buffer element is the user
2990	 * pointer to the buffer_object containing the fd_array.
2991	 * Convert the address to an offset relative to
2992	 * the base of the transaction buffer.
2993	 */
2994	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2995		fda->parent_offset;
2996	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2997				fda->parent_offset;
2998
2999	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
3000	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
3001		binder_user_error("%d:%d parent offset not aligned correctly.\n",
3002				  proc->pid, thread->pid);
3003		return -EINVAL;
3004	}
3005	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
3006	if (ret)
3007		return ret;
3008
3009	for (fdi = 0; fdi < fda->num_fds; fdi++) {
3010		u32 fd;
3011		binder_size_t offset = fda_offset + fdi * sizeof(fd);
3012		binder_size_t sender_uoffset = fdi * sizeof(fd);
3013
3014		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
3015		if (!ret)
3016			ret = binder_translate_fd(fd, offset, t, thread,
3017						  in_reply_to);
3018		if (ret)
3019			return ret > 0 ? -EINVAL : ret;
3020	}
3021	return 0;
3022}
3023
3024static int binder_fixup_parent(struct list_head *pf_head,
3025			       struct binder_transaction *t,
3026			       struct binder_thread *thread,
3027			       struct binder_buffer_object *bp,
3028			       binder_size_t off_start_offset,
3029			       binder_size_t num_valid,
3030			       binder_size_t last_fixup_obj_off,
3031			       binder_size_t last_fixup_min_off)
3032{
3033	struct binder_buffer_object *parent;
3034	struct binder_buffer *b = t->buffer;
3035	struct binder_proc *proc = thread->proc;
3036	struct binder_proc *target_proc = t->to_proc;
3037	struct binder_object object;
3038	binder_size_t buffer_offset;
3039	binder_size_t parent_offset;
3040
3041	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
3042		return 0;
3043
3044	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
3045				     off_start_offset, &parent_offset,
3046				     num_valid);
3047	if (!parent) {
3048		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3049				  proc->pid, thread->pid);
3050		return -EINVAL;
3051	}
3052
3053	if (!binder_validate_fixup(target_proc, b, off_start_offset,
3054				   parent_offset, bp->parent_offset,
3055				   last_fixup_obj_off,
3056				   last_fixup_min_off)) {
3057		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3058				  proc->pid, thread->pid);
3059		return -EINVAL;
3060	}
3061
3062	if (parent->length < sizeof(binder_uintptr_t) ||
3063	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
3064		/* No space for a pointer here! */
3065		binder_user_error("%d:%d got transaction with invalid parent offset\n",
3066				  proc->pid, thread->pid);
3067		return -EINVAL;
3068	}
3069	buffer_offset = bp->parent_offset +
3070			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
3071	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
3072}
3073
3074/**
3075 * binder_proc_transaction() - sends a transaction to a process and wakes it up
3076 * @t:		transaction to send
3077 * @proc:	process to send the transaction to
3078 * @thread:	thread in @proc to send the transaction to (may be NULL)
3079 *
3080 * This function queues a transaction to the specified process. It will try
3081 * to find a thread in the target process to handle the transaction and
3082 * wake it up. If no thread is found, the work is queued to the proc
3083 * waitqueue.
3084 *
3085 * If the @thread parameter is not NULL, the transaction is always queued
3086 * to the waitlist of that specific thread.
3087 *
3088 * Return:	true if the transactions was successfully queued
3089 *		false if the target process or thread is dead
3090 */
3091static bool binder_proc_transaction(struct binder_transaction *t,
3092				    struct binder_proc *proc,
3093				    struct binder_thread *thread)
3094{
3095	struct binder_node *node = t->buffer->target_node;
3096	bool oneway = !!(t->flags & TF_ONE_WAY);
3097	bool pending_async = false;
3098
3099	BUG_ON(!node);
3100	binder_node_lock(node);
3101	if (oneway) {
3102		BUG_ON(thread);
3103		if (node->has_async_transaction)
3104			pending_async = true;
3105		else
3106			node->has_async_transaction = true;
3107	}
3108
3109	binder_inner_proc_lock(proc);
3110
3111	if (proc->is_dead || (thread && thread->is_dead)) {
3112		binder_inner_proc_unlock(proc);
3113		binder_node_unlock(node);
3114		return false;
3115	}
3116
3117	if (!thread && !pending_async)
3118		thread = binder_select_thread_ilocked(proc);
3119
3120	if (thread)
3121		binder_enqueue_thread_work_ilocked(thread, &t->work);
3122	else if (!pending_async)
3123		binder_enqueue_work_ilocked(&t->work, &proc->todo);
3124	else
3125		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
3126
3127	if (!pending_async)
3128		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
3129
3130	binder_inner_proc_unlock(proc);
3131	binder_node_unlock(node);
3132
3133	return true;
3134}
3135
3136/**
3137 * binder_get_node_refs_for_txn() - Get required refs on node for txn
3138 * @node:         struct binder_node for which to get refs
3139 * @proc:         returns @node->proc if valid
3140 * @error:        if no @proc then returns BR_DEAD_REPLY
3141 *
3142 * User-space normally keeps the node alive when creating a transaction
3143 * since it has a reference to the target. The local strong ref keeps it
3144 * alive if the sending process dies before the target process processes
3145 * the transaction. If the source process is malicious or has a reference
3146 * counting bug, relying on the local strong ref can fail.
3147 *
3148 * Since user-space can cause the local strong ref to go away, we also take
3149 * a tmpref on the node to ensure it survives while we are constructing
3150 * the transaction. We also need a tmpref on the proc while we are
3151 * constructing the transaction, so we take that here as well.
3152 *
3153 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
3154 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
3155 * target proc has died, @error is set to BR_DEAD_REPLY
3156 */
3157static struct binder_node *binder_get_node_refs_for_txn(
3158		struct binder_node *node,
3159		struct binder_proc **procp,
3160		uint32_t *error)
3161{
3162	struct binder_node *target_node = NULL;
3163
3164	binder_node_inner_lock(node);
3165	if (node->proc) {
3166		target_node = node;
3167		binder_inc_node_nilocked(node, 1, 0, NULL);
3168		binder_inc_node_tmpref_ilocked(node);
3169		node->proc->tmp_ref++;
3170		*procp = node->proc;
3171	} else
3172		*error = BR_DEAD_REPLY;
3173	binder_node_inner_unlock(node);
3174
3175	return target_node;
3176}
3177
3178static void binder_transaction(struct binder_proc *proc,
3179			       struct binder_thread *thread,
3180			       struct binder_transaction_data *tr, int reply,
3181			       binder_size_t extra_buffers_size)
3182{
3183	int ret;
3184	struct binder_transaction *t;
3185	struct binder_work *w;
3186	struct binder_work *tcomplete;
3187	binder_size_t buffer_offset = 0;
3188	binder_size_t off_start_offset, off_end_offset;
3189	binder_size_t off_min;
3190	binder_size_t sg_buf_offset, sg_buf_end_offset;
3191	binder_size_t user_offset = 0;
3192	struct binder_proc *target_proc = NULL;
3193	struct binder_thread *target_thread = NULL;
3194	struct binder_node *target_node = NULL;
3195	struct binder_transaction *in_reply_to = NULL;
3196	struct binder_transaction_log_entry *e;
3197	uint32_t return_error = 0;
3198	uint32_t return_error_param = 0;
3199	uint32_t return_error_line = 0;
3200	binder_size_t last_fixup_obj_off = 0;
3201	binder_size_t last_fixup_min_off = 0;
3202	struct binder_context *context = proc->context;
3203	int t_debug_id = atomic_inc_return(&binder_last_id);
3204	char *secctx = NULL;
3205	u32 secctx_sz = 0;
3206	struct list_head sgc_head;
3207	struct list_head pf_head;
3208	const void __user *user_buffer = (const void __user *)
3209				(uintptr_t)tr->data.ptr.buffer;
3210	INIT_LIST_HEAD(&sgc_head);
3211	INIT_LIST_HEAD(&pf_head);
3212
3213	e = binder_transaction_log_add(&binder_transaction_log);
3214	e->debug_id = t_debug_id;
3215	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3216	e->from_proc = proc->pid;
3217	e->from_thread = thread->pid;
3218	e->target_handle = tr->target.handle;
3219	e->data_size = tr->data_size;
3220	e->offsets_size = tr->offsets_size;
3221	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
3222
3223	if (reply) {
3224		binder_inner_proc_lock(proc);
3225		in_reply_to = thread->transaction_stack;
3226		if (in_reply_to == NULL) {
3227			binder_inner_proc_unlock(proc);
3228			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
3229					  proc->pid, thread->pid);
3230			return_error = BR_FAILED_REPLY;
3231			return_error_param = -EPROTO;
3232			return_error_line = __LINE__;
3233			goto err_empty_call_stack;
3234		}
3235		if (in_reply_to->to_thread != thread) {
3236			spin_lock(&in_reply_to->lock);
3237			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
3238				proc->pid, thread->pid, in_reply_to->debug_id,
3239				in_reply_to->to_proc ?
3240				in_reply_to->to_proc->pid : 0,
3241				in_reply_to->to_thread ?
3242				in_reply_to->to_thread->pid : 0);
3243			spin_unlock(&in_reply_to->lock);
3244			binder_inner_proc_unlock(proc);
3245			return_error = BR_FAILED_REPLY;
3246			return_error_param = -EPROTO;
3247			return_error_line = __LINE__;
3248			in_reply_to = NULL;
3249			goto err_bad_call_stack;
3250		}
3251		thread->transaction_stack = in_reply_to->to_parent;
3252		binder_inner_proc_unlock(proc);
3253		binder_set_nice(in_reply_to->saved_priority);
3254		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3255		if (target_thread == NULL) {
3256			/* annotation for sparse */
3257			__release(&target_thread->proc->inner_lock);
3258			return_error = BR_DEAD_REPLY;
3259			return_error_line = __LINE__;
3260			goto err_dead_binder;
3261		}
3262		if (target_thread->transaction_stack != in_reply_to) {
3263			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
3264				proc->pid, thread->pid,
3265				target_thread->transaction_stack ?
3266				target_thread->transaction_stack->debug_id : 0,
3267				in_reply_to->debug_id);
3268			binder_inner_proc_unlock(target_thread->proc);
3269			return_error = BR_FAILED_REPLY;
3270			return_error_param = -EPROTO;
3271			return_error_line = __LINE__;
3272			in_reply_to = NULL;
3273			target_thread = NULL;
3274			goto err_dead_binder;
3275		}
3276		target_proc = target_thread->proc;
3277		target_proc->tmp_ref++;
3278		binder_inner_proc_unlock(target_thread->proc);
3279	} else {
3280		if (tr->target.handle) {
3281			struct binder_ref *ref;
3282
3283			/*
3284			 * There must already be a strong ref
3285			 * on this node. If so, do a strong
3286			 * increment on the node to ensure it
3287			 * stays alive until the transaction is
3288			 * done.
3289			 */
3290			binder_proc_lock(proc);
3291			ref = binder_get_ref_olocked(proc, tr->target.handle,
3292						     true);
3293			if (ref) {
3294				target_node = binder_get_node_refs_for_txn(
3295						ref->node, &target_proc,
3296						&return_error);
3297			} else {
3298				binder_user_error("%d:%d got transaction to invalid handle\n",
3299						  proc->pid, thread->pid);
3300				return_error = BR_FAILED_REPLY;
3301			}
3302			binder_proc_unlock(proc);
3303		} else {
3304			mutex_lock(&context->context_mgr_node_lock);
3305			target_node = context->binder_context_mgr_node;
3306			if (target_node)
3307				target_node = binder_get_node_refs_for_txn(
3308						target_node, &target_proc,
3309						&return_error);
3310			else
3311				return_error = BR_DEAD_REPLY;
3312			mutex_unlock(&context->context_mgr_node_lock);
3313			if (target_node && target_proc->pid == proc->pid) {
3314				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3315						  proc->pid, thread->pid);
3316				return_error = BR_FAILED_REPLY;
3317				return_error_param = -EINVAL;
3318				return_error_line = __LINE__;
3319				goto err_invalid_target_handle;
3320			}
3321		}
3322		if (!target_node) {
3323			/*
3324			 * return_error is set above
3325			 */
3326			return_error_param = -EINVAL;
3327			return_error_line = __LINE__;
3328			goto err_dead_binder;
3329		}
3330		e->to_node = target_node->debug_id;
3331		if (WARN_ON(proc == target_proc)) {
3332			return_error = BR_FAILED_REPLY;
3333			return_error_param = -EINVAL;
3334			return_error_line = __LINE__;
3335			goto err_invalid_target_handle;
3336		}
3337		if (security_binder_transaction(proc->cred,
3338						target_proc->cred) < 0) {
3339			return_error = BR_FAILED_REPLY;
3340			return_error_param = -EPERM;
3341			return_error_line = __LINE__;
3342			goto err_invalid_target_handle;
3343		}
3344		binder_inner_proc_lock(proc);
3345
3346		w = list_first_entry_or_null(&thread->todo,
3347					     struct binder_work, entry);
3348		if (!(tr->flags & TF_ONE_WAY) && w &&
3349		    w->type == BINDER_WORK_TRANSACTION) {
3350			/*
3351			 * Do not allow new outgoing transaction from a
3352			 * thread that has a transaction at the head of
3353			 * its todo list. Only need to check the head
3354			 * because binder_select_thread_ilocked picks a
3355			 * thread from proc->waiting_threads to enqueue
3356			 * the transaction, and nothing is queued to the
3357			 * todo list while the thread is on waiting_threads.
3358			 */
3359			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
3360					  proc->pid, thread->pid);
3361			binder_inner_proc_unlock(proc);
3362			return_error = BR_FAILED_REPLY;
3363			return_error_param = -EPROTO;
3364			return_error_line = __LINE__;
3365			goto err_bad_todo_list;
3366		}
3367
3368		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3369			struct binder_transaction *tmp;
3370
3371			tmp = thread->transaction_stack;
3372			if (tmp->to_thread != thread) {
3373				spin_lock(&tmp->lock);
3374				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3375					proc->pid, thread->pid, tmp->debug_id,
3376					tmp->to_proc ? tmp->to_proc->pid : 0,
3377					tmp->to_thread ?
3378					tmp->to_thread->pid : 0);
3379				spin_unlock(&tmp->lock);
3380				binder_inner_proc_unlock(proc);
3381				return_error = BR_FAILED_REPLY;
3382				return_error_param = -EPROTO;
3383				return_error_line = __LINE__;
3384				goto err_bad_call_stack;
3385			}
3386			while (tmp) {
3387				struct binder_thread *from;
3388
3389				spin_lock(&tmp->lock);
3390				from = tmp->from;
3391				if (from && from->proc == target_proc) {
3392					atomic_inc(&from->tmp_ref);
3393					target_thread = from;
3394					spin_unlock(&tmp->lock);
3395					break;
3396				}
3397				spin_unlock(&tmp->lock);
3398				tmp = tmp->from_parent;
3399			}
3400		}
3401		binder_inner_proc_unlock(proc);
3402	}
3403	if (target_thread)
3404		e->to_thread = target_thread->pid;
3405	e->to_proc = target_proc->pid;
3406
3407	/* TODO: reuse incoming transaction for reply */
3408	t = kzalloc(sizeof(*t), GFP_KERNEL);
3409	if (t == NULL) {
3410		return_error = BR_FAILED_REPLY;
3411		return_error_param = -ENOMEM;
3412		return_error_line = __LINE__;
3413		goto err_alloc_t_failed;
3414	}
3415	INIT_LIST_HEAD(&t->fd_fixups);
3416	binder_stats_created(BINDER_STAT_TRANSACTION);
3417	spin_lock_init(&t->lock);
3418
3419	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3420	if (tcomplete == NULL) {
3421		return_error = BR_FAILED_REPLY;
3422		return_error_param = -ENOMEM;
3423		return_error_line = __LINE__;
3424		goto err_alloc_tcomplete_failed;
3425	}
3426	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3427
3428	t->debug_id = t_debug_id;
3429
3430	if (reply)
3431		binder_debug(BINDER_DEBUG_TRANSACTION,
3432			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3433			     proc->pid, thread->pid, t->debug_id,
3434			     target_proc->pid, target_thread->pid,
3435			     (u64)tr->data.ptr.buffer,
3436			     (u64)tr->data.ptr.offsets,
3437			     (u64)tr->data_size, (u64)tr->offsets_size,
3438			     (u64)extra_buffers_size);
3439	else
3440		binder_debug(BINDER_DEBUG_TRANSACTION,
3441			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3442			     proc->pid, thread->pid, t->debug_id,
3443			     target_proc->pid, target_node->debug_id,
3444			     (u64)tr->data.ptr.buffer,
3445			     (u64)tr->data.ptr.offsets,
3446			     (u64)tr->data_size, (u64)tr->offsets_size,
3447			     (u64)extra_buffers_size);
3448
3449	if (!reply && !(tr->flags & TF_ONE_WAY)) {
3450		t->from = thread;
3451#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3452		t->async_from_pid = -1;
3453		t->async_from_tid = -1;
3454#endif
3455	} else {
3456		t->from = NULL;
3457#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3458		t->async_from_pid = thread->proc->pid;
3459		t->async_from_tid = thread->pid;
3460#endif
3461}
3462	t->sender_euid = task_euid(proc->tsk);
3463#ifdef CONFIG_ACCESS_TOKENID
3464	t->sender_tokenid = current->token;
3465	t->first_tokenid = current->ftoken;
3466#endif /* CONFIG_ACCESS_TOKENID */
3467	t->to_proc = target_proc;
3468	t->to_thread = target_thread;
3469	t->code = tr->code;
3470	t->flags = tr->flags;
3471	t->priority = task_nice(current);
3472
3473	if (target_node && target_node->txn_security_ctx) {
3474		u32 secid;
3475		size_t added_size;
3476
3477		security_cred_getsecid(proc->cred, &secid);
3478		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3479		if (ret) {
3480			return_error = BR_FAILED_REPLY;
3481			return_error_param = ret;
3482			return_error_line = __LINE__;
3483			goto err_get_secctx_failed;
3484		}
3485		added_size = ALIGN(secctx_sz, sizeof(u64));
3486		extra_buffers_size += added_size;
3487		if (extra_buffers_size < added_size) {
3488			/* integer overflow of extra_buffers_size */
3489			return_error = BR_FAILED_REPLY;
3490			return_error_param = EINVAL;
3491			return_error_line = __LINE__;
3492			goto err_bad_extra_size;
3493		}
3494	}
3495
3496	trace_binder_transaction(reply, t, target_node);
3497
3498	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3499		tr->offsets_size, extra_buffers_size,
3500		!reply && (t->flags & TF_ONE_WAY), current->tgid);
3501	if (IS_ERR(t->buffer)) {
3502		/*
3503		 * -ESRCH indicates VMA cleared. The target is dying.
3504		 */
3505		return_error_param = PTR_ERR(t->buffer);
3506		return_error = return_error_param == -ESRCH ?
3507			BR_DEAD_REPLY : BR_FAILED_REPLY;
3508		return_error_line = __LINE__;
3509		t->buffer = NULL;
3510		goto err_binder_alloc_buf_failed;
3511	}
3512	if (secctx) {
3513		int err;
3514		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3515				    ALIGN(tr->offsets_size, sizeof(void *)) +
3516				    ALIGN(extra_buffers_size, sizeof(void *)) -
3517				    ALIGN(secctx_sz, sizeof(u64));
3518
3519		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3520		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3521						  t->buffer, buf_offset,
3522						  secctx, secctx_sz);
3523		if (err) {
3524			t->security_ctx = 0;
3525			WARN_ON(1);
3526		}
3527		security_release_secctx(secctx, secctx_sz);
3528		secctx = NULL;
3529	}
3530	t->buffer->debug_id = t->debug_id;
3531	t->buffer->transaction = t;
3532	t->buffer->target_node = target_node;
3533	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3534	trace_binder_transaction_alloc_buf(t->buffer);
3535
3536	if (binder_alloc_copy_user_to_buffer(
3537				&target_proc->alloc,
3538				t->buffer,
3539				ALIGN(tr->data_size, sizeof(void *)),
3540				(const void __user *)
3541					(uintptr_t)tr->data.ptr.offsets,
3542				tr->offsets_size)) {
3543		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3544				proc->pid, thread->pid);
3545		return_error = BR_FAILED_REPLY;
3546		return_error_param = -EFAULT;
3547		return_error_line = __LINE__;
3548		goto err_copy_data_failed;
3549	}
3550	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3551		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3552				proc->pid, thread->pid, (u64)tr->offsets_size);
3553		return_error = BR_FAILED_REPLY;
3554		return_error_param = -EINVAL;
3555		return_error_line = __LINE__;
3556		goto err_bad_offset;
3557	}
3558	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3559		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3560				  proc->pid, thread->pid,
3561				  (u64)extra_buffers_size);
3562		return_error = BR_FAILED_REPLY;
3563		return_error_param = -EINVAL;
3564		return_error_line = __LINE__;
3565		goto err_bad_offset;
3566	}
3567	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3568	buffer_offset = off_start_offset;
3569	off_end_offset = off_start_offset + tr->offsets_size;
3570	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3571	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3572		ALIGN(secctx_sz, sizeof(u64));
3573	off_min = 0;
3574	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3575	     buffer_offset += sizeof(binder_size_t)) {
3576		struct binder_object_header *hdr;
3577		size_t object_size;
3578		struct binder_object object;
3579		binder_size_t object_offset;
3580		binder_size_t copy_size;
3581
3582		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3583						  &object_offset,
3584						  t->buffer,
3585						  buffer_offset,
3586						  sizeof(object_offset))) {
3587			return_error = BR_FAILED_REPLY;
3588			return_error_param = -EINVAL;
3589			return_error_line = __LINE__;
3590			goto err_bad_offset;
3591		}
3592
3593		/*
3594		 * Copy the source user buffer up to the next object
3595		 * that will be processed.
3596		 */
3597		copy_size = object_offset - user_offset;
3598		if (copy_size && (user_offset > object_offset ||
3599				object_offset > tr->data_size ||
3600				binder_alloc_copy_user_to_buffer(
3601					&target_proc->alloc,
3602					t->buffer, user_offset,
3603					user_buffer + user_offset,
3604					copy_size))) {
3605			binder_user_error("%d:%d got transaction with invalid data ptr\n",
3606					proc->pid, thread->pid);
3607			return_error = BR_FAILED_REPLY;
3608			return_error_param = -EFAULT;
3609			return_error_line = __LINE__;
3610			goto err_copy_data_failed;
3611		}
3612		object_size = binder_get_object(target_proc, user_buffer,
3613				t->buffer, object_offset, &object);
3614		if (object_size == 0 || object_offset < off_min) {
3615			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3616					  proc->pid, thread->pid,
3617					  (u64)object_offset,
3618					  (u64)off_min,
3619					  (u64)t->buffer->data_size);
3620			return_error = BR_FAILED_REPLY;
3621			return_error_param = -EINVAL;
3622			return_error_line = __LINE__;
3623			goto err_bad_offset;
3624		}
3625		/*
3626		 * Set offset to the next buffer fragment to be
3627		 * copied
3628		 */
3629		user_offset = object_offset + object_size;
3630
3631		hdr = &object.hdr;
3632		off_min = object_offset + object_size;
3633		switch (hdr->type) {
3634		case BINDER_TYPE_BINDER:
3635		case BINDER_TYPE_WEAK_BINDER: {
3636			struct flat_binder_object *fp;
3637
3638			fp = to_flat_binder_object(hdr);
3639			ret = binder_translate_binder(fp, t, thread);
3640
3641			if (ret < 0 ||
3642			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3643							t->buffer,
3644							object_offset,
3645							fp, sizeof(*fp))) {
3646				return_error = BR_FAILED_REPLY;
3647				return_error_param = ret;
3648				return_error_line = __LINE__;
3649				goto err_translate_failed;
3650			}
3651		} break;
3652		case BINDER_TYPE_HANDLE:
3653		case BINDER_TYPE_WEAK_HANDLE: {
3654			struct flat_binder_object *fp;
3655
3656			fp = to_flat_binder_object(hdr);
3657			ret = binder_translate_handle(fp, t, thread);
3658			if (ret < 0 ||
3659			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3660							t->buffer,
3661							object_offset,
3662							fp, sizeof(*fp))) {
3663				return_error = BR_FAILED_REPLY;
3664				return_error_param = ret;
3665				return_error_line = __LINE__;
3666				goto err_translate_failed;
3667			}
3668		} break;
3669
3670		case BINDER_TYPE_FD: {
3671			struct binder_fd_object *fp = to_binder_fd_object(hdr);
3672			binder_size_t fd_offset = object_offset +
3673				(uintptr_t)&fp->fd - (uintptr_t)fp;
3674			int ret = binder_translate_fd(fp->fd, fd_offset, t,
3675						      thread, in_reply_to);
3676
3677			fp->pad_binder = 0;
3678			if (ret < 0 ||
3679			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3680							t->buffer,
3681							object_offset,
3682							fp, sizeof(*fp))) {
3683				return_error = BR_FAILED_REPLY;
3684				return_error_param = ret;
3685				return_error_line = __LINE__;
3686				goto err_translate_failed;
3687			}
3688		} break;
3689		case BINDER_TYPE_FDA: {
3690			struct binder_object ptr_object;
3691			binder_size_t parent_offset;
3692			struct binder_object user_object;
3693			size_t user_parent_size;
3694			struct binder_fd_array_object *fda =
3695				to_binder_fd_array_object(hdr);
3696			size_t num_valid = (buffer_offset - off_start_offset) /
3697						sizeof(binder_size_t);
3698			struct binder_buffer_object *parent =
3699				binder_validate_ptr(target_proc, t->buffer,
3700						    &ptr_object, fda->parent,
3701						    off_start_offset,
3702						    &parent_offset,
3703						    num_valid);
3704			if (!parent) {
3705				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3706						  proc->pid, thread->pid);
3707				return_error = BR_FAILED_REPLY;
3708				return_error_param = -EINVAL;
3709				return_error_line = __LINE__;
3710				goto err_bad_parent;
3711			}
3712			if (!binder_validate_fixup(target_proc, t->buffer,
3713						   off_start_offset,
3714						   parent_offset,
3715						   fda->parent_offset,
3716						   last_fixup_obj_off,
3717						   last_fixup_min_off)) {
3718				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3719						  proc->pid, thread->pid);
3720				return_error = BR_FAILED_REPLY;
3721				return_error_param = -EINVAL;
3722				return_error_line = __LINE__;
3723				goto err_bad_parent;
3724			}
3725			/*
3726			 * We need to read the user version of the parent
3727			 * object to get the original user offset
3728			 */
3729			user_parent_size =
3730				binder_get_object(proc, user_buffer, t->buffer,
3731						  parent_offset, &user_object);
3732			if (user_parent_size != sizeof(user_object.bbo)) {
3733				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3734						  proc->pid, thread->pid,
3735						  user_parent_size,
3736						  sizeof(user_object.bbo));
3737				return_error = BR_FAILED_REPLY;
3738				return_error_param = -EINVAL;
3739				return_error_line = __LINE__;
3740				goto err_bad_parent;
3741			}
3742			ret = binder_translate_fd_array(&pf_head, fda,
3743							user_buffer, parent,
3744							&user_object.bbo, t,
3745							thread, in_reply_to);
3746			if (!ret)
3747				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3748								  t->buffer,
3749								  object_offset,
3750								  fda, sizeof(*fda));
3751			if (ret) {
3752				return_error = BR_FAILED_REPLY;
3753				return_error_param = ret > 0 ? -EINVAL : ret;
3754				return_error_line = __LINE__;
3755				goto err_translate_failed;
3756			}
3757			last_fixup_obj_off = parent_offset;
3758			last_fixup_min_off =
3759				fda->parent_offset + sizeof(u32) * fda->num_fds;
3760		} break;
3761		case BINDER_TYPE_PTR: {
3762			struct binder_buffer_object *bp =
3763				to_binder_buffer_object(hdr);
3764			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3765			size_t num_valid;
3766
3767			if (bp->length > buf_left) {
3768				binder_user_error("%d:%d got transaction with too large buffer\n",
3769						  proc->pid, thread->pid);
3770				return_error = BR_FAILED_REPLY;
3771				return_error_param = -EINVAL;
3772				return_error_line = __LINE__;
3773				goto err_bad_offset;
3774			}
3775			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3776				(const void __user *)(uintptr_t)bp->buffer,
3777				bp->length);
3778			if (ret) {
3779				return_error = BR_FAILED_REPLY;
3780				return_error_param = ret;
3781				return_error_line = __LINE__;
3782				goto err_translate_failed;
3783			}
3784			/* Fixup buffer pointer to target proc address space */
3785			bp->buffer = (uintptr_t)
3786				t->buffer->user_data + sg_buf_offset;
3787			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3788
3789			num_valid = (buffer_offset - off_start_offset) /
3790					sizeof(binder_size_t);
3791			ret = binder_fixup_parent(&pf_head, t,
3792						  thread, bp,
3793						  off_start_offset,
3794						  num_valid,
3795						  last_fixup_obj_off,
3796						  last_fixup_min_off);
3797			if (ret < 0 ||
3798			    binder_alloc_copy_to_buffer(&target_proc->alloc,
3799							t->buffer,
3800							object_offset,
3801							bp, sizeof(*bp))) {
3802				return_error = BR_FAILED_REPLY;
3803				return_error_param = ret;
3804				return_error_line = __LINE__;
3805				goto err_translate_failed;
3806			}
3807			last_fixup_obj_off = object_offset;
3808			last_fixup_min_off = 0;
3809		} break;
3810		default:
3811			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3812				proc->pid, thread->pid, hdr->type);
3813			return_error = BR_FAILED_REPLY;
3814			return_error_param = -EINVAL;
3815			return_error_line = __LINE__;
3816			goto err_bad_object_type;
3817		}
3818	}
3819	/* Done processing objects, copy the rest of the buffer */
3820	if (binder_alloc_copy_user_to_buffer(
3821				&target_proc->alloc,
3822				t->buffer, user_offset,
3823				user_buffer + user_offset,
3824				tr->data_size - user_offset)) {
3825		binder_user_error("%d:%d got transaction with invalid data ptr\n",
3826				proc->pid, thread->pid);
3827		return_error = BR_FAILED_REPLY;
3828		return_error_param = -EFAULT;
3829		return_error_line = __LINE__;
3830		goto err_copy_data_failed;
3831	}
3832
3833	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3834					    &sgc_head, &pf_head);
3835	if (ret) {
3836		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3837				  proc->pid, thread->pid);
3838		return_error = BR_FAILED_REPLY;
3839		return_error_param = ret;
3840		return_error_line = __LINE__;
3841		goto err_copy_data_failed;
3842	}
3843	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3844	t->work.type = BINDER_WORK_TRANSACTION;
3845
3846	if (reply) {
3847		binder_enqueue_thread_work(thread, tcomplete);
3848		binder_inner_proc_lock(target_proc);
3849		if (target_thread->is_dead) {
3850			binder_inner_proc_unlock(target_proc);
3851			goto err_dead_proc_or_thread;
3852		}
3853		BUG_ON(t->buffer->async_transaction != 0);
3854#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3855		t->timestamp = in_reply_to->timestamp;
3856#endif
3857		binder_pop_transaction_ilocked(target_thread, in_reply_to);
3858		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3859		binder_inner_proc_unlock(target_proc);
3860		wake_up_interruptible_sync(&target_thread->wait);
3861		binder_free_transaction(in_reply_to);
3862	} else if (!(t->flags & TF_ONE_WAY)) {
3863		BUG_ON(t->buffer->async_transaction != 0);
3864		binder_inner_proc_lock(proc);
3865		/*
3866		 * Defer the TRANSACTION_COMPLETE, so we don't return to
3867		 * userspace immediately; this allows the target process to
3868		 * immediately start processing this transaction, reducing
3869		 * latency. We will then return the TRANSACTION_COMPLETE when
3870		 * the target replies (or there is an error).
3871		 */
3872		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3873		t->need_reply = 1;
3874		t->from_parent = thread->transaction_stack;
3875		thread->transaction_stack = t;
3876#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3877		t->timestamp = binder_clock();
3878#endif
3879		binder_inner_proc_unlock(proc);
3880		if (!binder_proc_transaction(t, target_proc, target_thread)) {
3881			binder_inner_proc_lock(proc);
3882			binder_pop_transaction_ilocked(thread, t);
3883			binder_inner_proc_unlock(proc);
3884			goto err_dead_proc_or_thread;
3885		}
3886	} else {
3887		BUG_ON(target_node == NULL);
3888		BUG_ON(t->buffer->async_transaction != 1);
3889		binder_enqueue_thread_work(thread, tcomplete);
3890#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
3891		t->timestamp = binder_clock();
3892#endif
3893		if (!binder_proc_transaction(t, target_proc, NULL))
3894			goto err_dead_proc_or_thread;
3895	}
3896	if (target_thread)
3897		binder_thread_dec_tmpref(target_thread);
3898	binder_proc_dec_tmpref(target_proc);
3899	if (target_node)
3900		binder_dec_node_tmpref(target_node);
3901	/*
3902	 * write barrier to synchronize with initialization
3903	 * of log entry
3904	 */
3905	smp_wmb();
3906	WRITE_ONCE(e->debug_id_done, t_debug_id);
3907	return;
3908
3909err_dead_proc_or_thread:
3910	return_error = BR_DEAD_REPLY;
3911	return_error_line = __LINE__;
3912	binder_dequeue_work(proc, tcomplete);
3913err_translate_failed:
3914err_bad_object_type:
3915err_bad_offset:
3916err_bad_parent:
3917err_copy_data_failed:
3918	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3919	binder_free_txn_fixups(t);
3920	trace_binder_transaction_failed_buffer_release(t->buffer);
3921	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3922					  buffer_offset, true);
3923	if (target_node)
3924		binder_dec_node_tmpref(target_node);
3925	target_node = NULL;
3926	t->buffer->transaction = NULL;
3927	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3928err_binder_alloc_buf_failed:
3929err_bad_extra_size:
3930	if (secctx)
3931		security_release_secctx(secctx, secctx_sz);
3932err_get_secctx_failed:
3933	kfree(tcomplete);
3934	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3935err_alloc_tcomplete_failed:
3936	kfree(t);
3937	binder_stats_deleted(BINDER_STAT_TRANSACTION);
3938err_alloc_t_failed:
3939err_bad_todo_list:
3940err_bad_call_stack:
3941err_empty_call_stack:
3942err_dead_binder:
3943err_invalid_target_handle:
3944	if (target_thread)
3945		binder_thread_dec_tmpref(target_thread);
3946	if (target_proc)
3947		binder_proc_dec_tmpref(target_proc);
3948	if (target_node) {
3949		binder_dec_node(target_node, 1, 0);
3950		binder_dec_node_tmpref(target_node);
3951	}
3952
3953	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3954		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3955		     proc->pid, thread->pid, return_error, return_error_param,
3956		     (u64)tr->data_size, (u64)tr->offsets_size,
3957		     return_error_line);
3958
3959	{
3960		struct binder_transaction_log_entry *fe;
3961
3962		e->return_error = return_error;
3963		e->return_error_param = return_error_param;
3964		e->return_error_line = return_error_line;
3965		fe = binder_transaction_log_add(&binder_transaction_log_failed);
3966		*fe = *e;
3967		/*
3968		 * write barrier to synchronize with initialization
3969		 * of log entry
3970		 */
3971		smp_wmb();
3972		WRITE_ONCE(e->debug_id_done, t_debug_id);
3973		WRITE_ONCE(fe->debug_id_done, t_debug_id);
3974	}
3975
3976	BUG_ON(thread->return_error.cmd != BR_OK);
3977	if (in_reply_to) {
3978		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3979		binder_enqueue_thread_work(thread, &thread->return_error.work);
3980		binder_send_failed_reply(in_reply_to, return_error);
3981	} else {
3982		thread->return_error.cmd = return_error;
3983		binder_enqueue_thread_work(thread, &thread->return_error.work);
3984	}
3985}
3986
3987/**
3988 * binder_free_buf() - free the specified buffer
3989 * @proc:	binder proc that owns buffer
3990 * @buffer:	buffer to be freed
3991 * @is_failure:	failed to send transaction
3992 *
3993 * If buffer for an async transaction, enqueue the next async
3994 * transaction from the node.
3995 *
3996 * Cleanup buffer and free it.
3997 */
3998static void
3999binder_free_buf(struct binder_proc *proc,
4000		struct binder_thread *thread,
4001		struct binder_buffer *buffer, bool is_failure)
4002{
4003	binder_inner_proc_lock(proc);
4004	if (buffer->transaction) {
4005		buffer->transaction->buffer = NULL;
4006		buffer->transaction = NULL;
4007	}
4008	binder_inner_proc_unlock(proc);
4009	if (buffer->async_transaction && buffer->target_node) {
4010		struct binder_node *buf_node;
4011		struct binder_work *w;
4012
4013		buf_node = buffer->target_node;
4014		binder_node_inner_lock(buf_node);
4015		BUG_ON(!buf_node->has_async_transaction);
4016		BUG_ON(buf_node->proc != proc);
4017		w = binder_dequeue_work_head_ilocked(
4018				&buf_node->async_todo);
4019		if (!w) {
4020			buf_node->has_async_transaction = false;
4021		} else {
4022			binder_enqueue_work_ilocked(
4023					w, &proc->todo);
4024			binder_wakeup_proc_ilocked(proc);
4025		}
4026		binder_node_inner_unlock(buf_node);
4027	}
4028	trace_binder_transaction_buffer_release(buffer);
4029	binder_release_entire_buffer(proc, thread, buffer, is_failure);
4030	binder_alloc_free_buf(&proc->alloc, buffer);
4031}
4032
4033static int binder_thread_write(struct binder_proc *proc,
4034			struct binder_thread *thread,
4035			binder_uintptr_t binder_buffer, size_t size,
4036			binder_size_t *consumed)
4037{
4038	uint32_t cmd;
4039	struct binder_context *context = proc->context;
4040	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4041	void __user *ptr = buffer + *consumed;
4042	void __user *end = buffer + size;
4043
4044	while (ptr < end && thread->return_error.cmd == BR_OK) {
4045		int ret;
4046
4047		if (get_user(cmd, (uint32_t __user *)ptr))
4048			return -EFAULT;
4049		ptr += sizeof(uint32_t);
4050		trace_binder_command(cmd);
4051		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
4052			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
4053			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
4054			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
4055		}
4056		switch (cmd) {
4057		case BC_INCREFS:
4058		case BC_ACQUIRE:
4059		case BC_RELEASE:
4060		case BC_DECREFS: {
4061			uint32_t target;
4062			const char *debug_string;
4063			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
4064			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
4065			struct binder_ref_data rdata;
4066
4067			if (get_user(target, (uint32_t __user *)ptr))
4068				return -EFAULT;
4069
4070			ptr += sizeof(uint32_t);
4071			ret = -1;
4072			if (increment && !target) {
4073				struct binder_node *ctx_mgr_node;
4074				mutex_lock(&context->context_mgr_node_lock);
4075				ctx_mgr_node = context->binder_context_mgr_node;
4076				if (ctx_mgr_node) {
4077					if (ctx_mgr_node->proc == proc) {
4078						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
4079								  proc->pid, thread->pid);
4080						mutex_unlock(&context->context_mgr_node_lock);
4081						return -EINVAL;
4082					}
4083					ret = binder_inc_ref_for_node(
4084							proc, ctx_mgr_node,
4085							strong, NULL, &rdata);
4086				}
4087				mutex_unlock(&context->context_mgr_node_lock);
4088			}
4089			if (ret)
4090				ret = binder_update_ref_for_handle(
4091						proc, target, increment, strong,
4092						&rdata);
4093			if (!ret && rdata.desc != target) {
4094				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
4095					proc->pid, thread->pid,
4096					target, rdata.desc);
4097			}
4098			switch (cmd) {
4099			case BC_INCREFS:
4100				debug_string = "IncRefs";
4101				break;
4102			case BC_ACQUIRE:
4103				debug_string = "Acquire";
4104				break;
4105			case BC_RELEASE:
4106				debug_string = "Release";
4107				break;
4108			case BC_DECREFS:
4109			default:
4110				debug_string = "DecRefs";
4111				break;
4112			}
4113			if (ret) {
4114				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
4115					proc->pid, thread->pid, debug_string,
4116					strong, target, ret);
4117				break;
4118			}
4119			binder_debug(BINDER_DEBUG_USER_REFS,
4120				     "%d:%d %s ref %d desc %d s %d w %d\n",
4121				     proc->pid, thread->pid, debug_string,
4122				     rdata.debug_id, rdata.desc, rdata.strong,
4123				     rdata.weak);
4124			break;
4125		}
4126		case BC_INCREFS_DONE:
4127		case BC_ACQUIRE_DONE: {
4128			binder_uintptr_t node_ptr;
4129			binder_uintptr_t cookie;
4130			struct binder_node *node;
4131			bool free_node;
4132
4133			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
4134				return -EFAULT;
4135			ptr += sizeof(binder_uintptr_t);
4136			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4137				return -EFAULT;
4138			ptr += sizeof(binder_uintptr_t);
4139			node = binder_get_node(proc, node_ptr);
4140			if (node == NULL) {
4141				binder_user_error("%d:%d %s u%016llx no match\n",
4142					proc->pid, thread->pid,
4143					cmd == BC_INCREFS_DONE ?
4144					"BC_INCREFS_DONE" :
4145					"BC_ACQUIRE_DONE",
4146					(u64)node_ptr);
4147				break;
4148			}
4149			if (cookie != node->cookie) {
4150				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
4151					proc->pid, thread->pid,
4152					cmd == BC_INCREFS_DONE ?
4153					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4154					(u64)node_ptr, node->debug_id,
4155					(u64)cookie, (u64)node->cookie);
4156				binder_put_node(node);
4157				break;
4158			}
4159			binder_node_inner_lock(node);
4160			if (cmd == BC_ACQUIRE_DONE) {
4161				if (node->pending_strong_ref == 0) {
4162					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
4163						proc->pid, thread->pid,
4164						node->debug_id);
4165					binder_node_inner_unlock(node);
4166					binder_put_node(node);
4167					break;
4168				}
4169				node->pending_strong_ref = 0;
4170			} else {
4171				if (node->pending_weak_ref == 0) {
4172					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
4173						proc->pid, thread->pid,
4174						node->debug_id);
4175					binder_node_inner_unlock(node);
4176					binder_put_node(node);
4177					break;
4178				}
4179				node->pending_weak_ref = 0;
4180			}
4181			free_node = binder_dec_node_nilocked(node,
4182					cmd == BC_ACQUIRE_DONE, 0);
4183			WARN_ON(free_node);
4184			binder_debug(BINDER_DEBUG_USER_REFS,
4185				     "%d:%d %s node %d ls %d lw %d tr %d\n",
4186				     proc->pid, thread->pid,
4187				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
4188				     node->debug_id, node->local_strong_refs,
4189				     node->local_weak_refs, node->tmp_refs);
4190			binder_node_inner_unlock(node);
4191			binder_put_node(node);
4192			break;
4193		}
4194		case BC_ATTEMPT_ACQUIRE:
4195			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
4196			return -EINVAL;
4197		case BC_ACQUIRE_RESULT:
4198			pr_err("BC_ACQUIRE_RESULT not supported\n");
4199			return -EINVAL;
4200
4201		case BC_FREE_BUFFER: {
4202			binder_uintptr_t data_ptr;
4203			struct binder_buffer *buffer;
4204
4205			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
4206				return -EFAULT;
4207			ptr += sizeof(binder_uintptr_t);
4208
4209			buffer = binder_alloc_prepare_to_free(&proc->alloc,
4210							      data_ptr);
4211			if (IS_ERR_OR_NULL(buffer)) {
4212				if (PTR_ERR(buffer) == -EPERM) {
4213					binder_user_error(
4214						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
4215						proc->pid, thread->pid,
4216						(u64)data_ptr);
4217				} else {
4218					binder_user_error(
4219						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
4220						proc->pid, thread->pid,
4221						(u64)data_ptr);
4222				}
4223				break;
4224			}
4225			binder_debug(BINDER_DEBUG_FREE_BUFFER,
4226				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
4227				     proc->pid, thread->pid, (u64)data_ptr,
4228				     buffer->debug_id,
4229				     buffer->transaction ? "active" : "finished");
4230			binder_free_buf(proc, thread, buffer, false);
4231			break;
4232		}
4233
4234		case BC_TRANSACTION_SG:
4235		case BC_REPLY_SG: {
4236			struct binder_transaction_data_sg tr;
4237
4238			if (copy_from_user(&tr, ptr, sizeof(tr)))
4239				return -EFAULT;
4240			ptr += sizeof(tr);
4241			binder_transaction(proc, thread, &tr.transaction_data,
4242					   cmd == BC_REPLY_SG, tr.buffers_size);
4243			break;
4244		}
4245		case BC_TRANSACTION:
4246		case BC_REPLY: {
4247			struct binder_transaction_data tr;
4248
4249			if (copy_from_user(&tr, ptr, sizeof(tr)))
4250				return -EFAULT;
4251			ptr += sizeof(tr);
4252			binder_transaction(proc, thread, &tr,
4253					   cmd == BC_REPLY, 0);
4254			break;
4255		}
4256
4257		case BC_REGISTER_LOOPER:
4258			binder_debug(BINDER_DEBUG_THREADS,
4259				     "%d:%d BC_REGISTER_LOOPER\n",
4260				     proc->pid, thread->pid);
4261			binder_inner_proc_lock(proc);
4262			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
4263				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4264				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
4265					proc->pid, thread->pid);
4266			} else if (proc->requested_threads == 0) {
4267				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4268				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
4269					proc->pid, thread->pid);
4270			} else {
4271				proc->requested_threads--;
4272				proc->requested_threads_started++;
4273			}
4274			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
4275			binder_inner_proc_unlock(proc);
4276			break;
4277		case BC_ENTER_LOOPER:
4278			binder_debug(BINDER_DEBUG_THREADS,
4279				     "%d:%d BC_ENTER_LOOPER\n",
4280				     proc->pid, thread->pid);
4281			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
4282				thread->looper |= BINDER_LOOPER_STATE_INVALID;
4283				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
4284					proc->pid, thread->pid);
4285			}
4286			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
4287			break;
4288		case BC_EXIT_LOOPER:
4289			binder_debug(BINDER_DEBUG_THREADS,
4290				     "%d:%d BC_EXIT_LOOPER\n",
4291				     proc->pid, thread->pid);
4292			thread->looper |= BINDER_LOOPER_STATE_EXITED;
4293			break;
4294
4295		case BC_REQUEST_DEATH_NOTIFICATION:
4296		case BC_CLEAR_DEATH_NOTIFICATION: {
4297			uint32_t target;
4298			binder_uintptr_t cookie;
4299			struct binder_ref *ref;
4300			struct binder_ref_death *death = NULL;
4301
4302			if (get_user(target, (uint32_t __user *)ptr))
4303				return -EFAULT;
4304			ptr += sizeof(uint32_t);
4305			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4306				return -EFAULT;
4307			ptr += sizeof(binder_uintptr_t);
4308			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4309				/*
4310				 * Allocate memory for death notification
4311				 * before taking lock
4312				 */
4313				death = kzalloc(sizeof(*death), GFP_KERNEL);
4314				if (death == NULL) {
4315					WARN_ON(thread->return_error.cmd !=
4316						BR_OK);
4317					thread->return_error.cmd = BR_ERROR;
4318					binder_enqueue_thread_work(
4319						thread,
4320						&thread->return_error.work);
4321					binder_debug(
4322						BINDER_DEBUG_FAILED_TRANSACTION,
4323						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
4324						proc->pid, thread->pid);
4325					break;
4326				}
4327			}
4328			binder_proc_lock(proc);
4329			ref = binder_get_ref_olocked(proc, target, false);
4330			if (ref == NULL) {
4331				binder_user_error("%d:%d %s invalid ref %d\n",
4332					proc->pid, thread->pid,
4333					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4334					"BC_REQUEST_DEATH_NOTIFICATION" :
4335					"BC_CLEAR_DEATH_NOTIFICATION",
4336					target);
4337				binder_proc_unlock(proc);
4338				kfree(death);
4339				break;
4340			}
4341
4342			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4343				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
4344				     proc->pid, thread->pid,
4345				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
4346				     "BC_REQUEST_DEATH_NOTIFICATION" :
4347				     "BC_CLEAR_DEATH_NOTIFICATION",
4348				     (u64)cookie, ref->data.debug_id,
4349				     ref->data.desc, ref->data.strong,
4350				     ref->data.weak, ref->node->debug_id);
4351
4352			binder_node_lock(ref->node);
4353			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
4354				if (ref->death) {
4355					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
4356						proc->pid, thread->pid);
4357					binder_node_unlock(ref->node);
4358					binder_proc_unlock(proc);
4359					kfree(death);
4360					break;
4361				}
4362				binder_stats_created(BINDER_STAT_DEATH);
4363				INIT_LIST_HEAD(&death->work.entry);
4364				death->cookie = cookie;
4365				ref->death = death;
4366				if (ref->node->proc == NULL) {
4367					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4368
4369					binder_inner_proc_lock(proc);
4370					binder_enqueue_work_ilocked(
4371						&ref->death->work, &proc->todo);
4372					binder_wakeup_proc_ilocked(proc);
4373					binder_inner_proc_unlock(proc);
4374				}
4375			} else {
4376				if (ref->death == NULL) {
4377					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
4378						proc->pid, thread->pid);
4379					binder_node_unlock(ref->node);
4380					binder_proc_unlock(proc);
4381					break;
4382				}
4383				death = ref->death;
4384				if (death->cookie != cookie) {
4385					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4386						proc->pid, thread->pid,
4387						(u64)death->cookie,
4388						(u64)cookie);
4389					binder_node_unlock(ref->node);
4390					binder_proc_unlock(proc);
4391					break;
4392				}
4393				ref->death = NULL;
4394				binder_inner_proc_lock(proc);
4395				if (list_empty(&death->work.entry)) {
4396					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4397					if (thread->looper &
4398					    (BINDER_LOOPER_STATE_REGISTERED |
4399					     BINDER_LOOPER_STATE_ENTERED))
4400						binder_enqueue_thread_work_ilocked(
4401								thread,
4402								&death->work);
4403					else {
4404						binder_enqueue_work_ilocked(
4405								&death->work,
4406								&proc->todo);
4407						binder_wakeup_proc_ilocked(
4408								proc);
4409					}
4410				} else {
4411					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4412					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4413				}
4414				binder_inner_proc_unlock(proc);
4415			}
4416			binder_node_unlock(ref->node);
4417			binder_proc_unlock(proc);
4418		} break;
4419		case BC_DEAD_BINDER_DONE: {
4420			struct binder_work *w;
4421			binder_uintptr_t cookie;
4422			struct binder_ref_death *death = NULL;
4423
4424			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4425				return -EFAULT;
4426
4427			ptr += sizeof(cookie);
4428			binder_inner_proc_lock(proc);
4429			list_for_each_entry(w, &proc->delivered_death,
4430					    entry) {
4431				struct binder_ref_death *tmp_death =
4432					container_of(w,
4433						     struct binder_ref_death,
4434						     work);
4435
4436				if (tmp_death->cookie == cookie) {
4437					death = tmp_death;
4438					break;
4439				}
4440			}
4441			binder_debug(BINDER_DEBUG_DEAD_BINDER,
4442				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4443				     proc->pid, thread->pid, (u64)cookie,
4444				     death);
4445			if (death == NULL) {
4446				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4447					proc->pid, thread->pid, (u64)cookie);
4448				binder_inner_proc_unlock(proc);
4449				break;
4450			}
4451			binder_dequeue_work_ilocked(&death->work);
4452			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4453				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4454				if (thread->looper &
4455					(BINDER_LOOPER_STATE_REGISTERED |
4456					 BINDER_LOOPER_STATE_ENTERED))
4457					binder_enqueue_thread_work_ilocked(
4458						thread, &death->work);
4459				else {
4460					binder_enqueue_work_ilocked(
4461							&death->work,
4462							&proc->todo);
4463					binder_wakeup_proc_ilocked(proc);
4464				}
4465			}
4466			binder_inner_proc_unlock(proc);
4467		} break;
4468
4469		default:
4470			pr_err("%d:%d unknown command %d\n",
4471			       proc->pid, thread->pid, cmd);
4472			return -EINVAL;
4473		}
4474		*consumed = ptr - buffer;
4475	}
4476	return 0;
4477}
4478
4479static void binder_stat_br(struct binder_proc *proc,
4480			   struct binder_thread *thread, uint32_t cmd)
4481{
4482	trace_binder_return(cmd);
4483	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4484		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4485		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4486		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4487	}
4488}
4489
4490static int binder_put_node_cmd(struct binder_proc *proc,
4491			       struct binder_thread *thread,
4492			       void __user **ptrp,
4493			       binder_uintptr_t node_ptr,
4494			       binder_uintptr_t node_cookie,
4495			       int node_debug_id,
4496			       uint32_t cmd, const char *cmd_name)
4497{
4498	void __user *ptr = *ptrp;
4499
4500	if (put_user(cmd, (uint32_t __user *)ptr))
4501		return -EFAULT;
4502	ptr += sizeof(uint32_t);
4503
4504	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4505		return -EFAULT;
4506	ptr += sizeof(binder_uintptr_t);
4507
4508	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4509		return -EFAULT;
4510	ptr += sizeof(binder_uintptr_t);
4511
4512	binder_stat_br(proc, thread, cmd);
4513	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4514		     proc->pid, thread->pid, cmd_name, node_debug_id,
4515		     (u64)node_ptr, (u64)node_cookie);
4516
4517	*ptrp = ptr;
4518	return 0;
4519}
4520
4521static int binder_wait_for_work(struct binder_thread *thread,
4522				bool do_proc_work)
4523{
4524	DEFINE_WAIT(wait);
4525	struct binder_proc *proc = thread->proc;
4526	int ret = 0;
4527
4528	freezer_do_not_count();
4529	binder_inner_proc_lock(proc);
4530	for (;;) {
4531		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4532		if (binder_has_work_ilocked(thread, do_proc_work))
4533			break;
4534		if (do_proc_work)
4535			list_add(&thread->waiting_thread_node,
4536				 &proc->waiting_threads);
4537		binder_inner_proc_unlock(proc);
4538		schedule();
4539		binder_inner_proc_lock(proc);
4540		list_del_init(&thread->waiting_thread_node);
4541		if (signal_pending(current)) {
4542			ret = -ERESTARTSYS;
4543			break;
4544		}
4545	}
4546	finish_wait(&thread->wait, &wait);
4547	binder_inner_proc_unlock(proc);
4548	freezer_count();
4549
4550	return ret;
4551}
4552
4553/**
4554 * binder_apply_fd_fixups() - finish fd translation
4555 * @proc:         binder_proc associated @t->buffer
4556 * @t:	binder transaction with list of fd fixups
4557 *
4558 * Now that we are in the context of the transaction target
4559 * process, we can allocate and install fds. Process the
4560 * list of fds to translate and fixup the buffer with the
4561 * new fds.
4562 *
4563 * If we fail to allocate an fd, then free the resources by
4564 * fput'ing files that have not been processed and ksys_close'ing
4565 * any fds that have already been allocated.
4566 */
4567static int binder_apply_fd_fixups(struct binder_proc *proc,
4568				  struct binder_transaction *t)
4569{
4570	struct binder_txn_fd_fixup *fixup, *tmp;
4571	int ret = 0;
4572
4573	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4574		int fd = get_unused_fd_flags(O_CLOEXEC);
4575
4576		if (fd < 0) {
4577			binder_debug(BINDER_DEBUG_TRANSACTION,
4578				     "failed fd fixup txn %d fd %d\n",
4579				     t->debug_id, fd);
4580			ret = -ENOMEM;
4581			break;
4582		}
4583		binder_debug(BINDER_DEBUG_TRANSACTION,
4584			     "fd fixup txn %d fd %d\n",
4585			     t->debug_id, fd);
4586		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4587		fd_install(fd, fixup->file);
4588		fixup->file = NULL;
4589		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4590						fixup->offset, &fd,
4591						sizeof(u32))) {
4592			ret = -EINVAL;
4593			break;
4594		}
4595	}
4596	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4597		if (fixup->file) {
4598			fput(fixup->file);
4599		} else if (ret) {
4600			u32 fd;
4601			int err;
4602
4603			err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
4604							    t->buffer,
4605							    fixup->offset,
4606							    sizeof(fd));
4607			WARN_ON(err);
4608			if (!err)
4609				binder_deferred_fd_close(fd);
4610		}
4611		list_del(&fixup->fixup_entry);
4612		kfree(fixup);
4613	}
4614
4615	return ret;
4616}
4617
4618static int binder_thread_read(struct binder_proc *proc,
4619			      struct binder_thread *thread,
4620			      binder_uintptr_t binder_buffer, size_t size,
4621			      binder_size_t *consumed, int non_block)
4622{
4623	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4624	void __user *ptr = buffer + *consumed;
4625	void __user *end = buffer + size;
4626
4627	int ret = 0;
4628	int wait_for_proc_work;
4629
4630	if (*consumed == 0) {
4631		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4632			return -EFAULT;
4633		ptr += sizeof(uint32_t);
4634	}
4635
4636retry:
4637	binder_inner_proc_lock(proc);
4638	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4639	binder_inner_proc_unlock(proc);
4640
4641	thread->looper |= BINDER_LOOPER_STATE_WAITING;
4642
4643	trace_binder_wait_for_work(wait_for_proc_work,
4644				   !!thread->transaction_stack,
4645				   !binder_worklist_empty(proc, &thread->todo));
4646	if (wait_for_proc_work) {
4647		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4648					BINDER_LOOPER_STATE_ENTERED))) {
4649			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4650				proc->pid, thread->pid, thread->looper);
4651			wait_event_interruptible(binder_user_error_wait,
4652						 binder_stop_on_user_error < 2);
4653		}
4654		binder_set_nice(proc->default_priority);
4655	}
4656
4657	if (non_block) {
4658		if (!binder_has_work(thread, wait_for_proc_work))
4659			ret = -EAGAIN;
4660	} else {
4661		ret = binder_wait_for_work(thread, wait_for_proc_work);
4662	}
4663
4664	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4665
4666	if (ret)
4667		return ret;
4668
4669	while (1) {
4670		uint32_t cmd;
4671		struct binder_transaction_data_secctx tr;
4672		struct binder_transaction_data *trd = &tr.transaction_data;
4673		struct binder_work *w = NULL;
4674		struct list_head *list = NULL;
4675		struct binder_transaction *t = NULL;
4676		struct binder_thread *t_from;
4677		size_t trsize = sizeof(*trd);
4678
4679		binder_inner_proc_lock(proc);
4680		if (!binder_worklist_empty_ilocked(&thread->todo))
4681			list = &thread->todo;
4682		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4683			   wait_for_proc_work)
4684			list = &proc->todo;
4685		else {
4686			binder_inner_proc_unlock(proc);
4687
4688			/* no data added */
4689			if (ptr - buffer == 4 && !thread->looper_need_return)
4690				goto retry;
4691			break;
4692		}
4693
4694		if (end - ptr < sizeof(tr) + 4) {
4695			binder_inner_proc_unlock(proc);
4696			break;
4697		}
4698		w = binder_dequeue_work_head_ilocked(list);
4699		if (binder_worklist_empty_ilocked(&thread->todo))
4700			thread->process_todo = false;
4701
4702		switch (w->type) {
4703		case BINDER_WORK_TRANSACTION: {
4704			binder_inner_proc_unlock(proc);
4705			t = container_of(w, struct binder_transaction, work);
4706		} break;
4707		case BINDER_WORK_RETURN_ERROR: {
4708			struct binder_error *e = container_of(
4709					w, struct binder_error, work);
4710
4711			WARN_ON(e->cmd == BR_OK);
4712			binder_inner_proc_unlock(proc);
4713			if (put_user(e->cmd, (uint32_t __user *)ptr))
4714				return -EFAULT;
4715			cmd = e->cmd;
4716			e->cmd = BR_OK;
4717			ptr += sizeof(uint32_t);
4718
4719			binder_stat_br(proc, thread, cmd);
4720		} break;
4721		case BINDER_WORK_TRANSACTION_COMPLETE: {
4722			binder_inner_proc_unlock(proc);
4723			cmd = BR_TRANSACTION_COMPLETE;
4724			kfree(w);
4725			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4726			if (put_user(cmd, (uint32_t __user *)ptr))
4727				return -EFAULT;
4728			ptr += sizeof(uint32_t);
4729
4730			binder_stat_br(proc, thread, cmd);
4731			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4732				     "%d:%d BR_TRANSACTION_COMPLETE\n",
4733				     proc->pid, thread->pid);
4734		} break;
4735		case BINDER_WORK_NODE: {
4736			struct binder_node *node = container_of(w, struct binder_node, work);
4737			int strong, weak;
4738			binder_uintptr_t node_ptr = node->ptr;
4739			binder_uintptr_t node_cookie = node->cookie;
4740			int node_debug_id = node->debug_id;
4741			int has_weak_ref;
4742			int has_strong_ref;
4743			void __user *orig_ptr = ptr;
4744
4745			BUG_ON(proc != node->proc);
4746			strong = node->internal_strong_refs ||
4747					node->local_strong_refs;
4748			weak = !hlist_empty(&node->refs) ||
4749					node->local_weak_refs ||
4750					node->tmp_refs || strong;
4751			has_strong_ref = node->has_strong_ref;
4752			has_weak_ref = node->has_weak_ref;
4753
4754			if (weak && !has_weak_ref) {
4755				node->has_weak_ref = 1;
4756				node->pending_weak_ref = 1;
4757				node->local_weak_refs++;
4758			}
4759			if (strong && !has_strong_ref) {
4760				node->has_strong_ref = 1;
4761				node->pending_strong_ref = 1;
4762				node->local_strong_refs++;
4763			}
4764			if (!strong && has_strong_ref)
4765				node->has_strong_ref = 0;
4766			if (!weak && has_weak_ref)
4767				node->has_weak_ref = 0;
4768			if (!weak && !strong) {
4769				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4770					     "%d:%d node %d u%016llx c%016llx deleted\n",
4771					     proc->pid, thread->pid,
4772					     node_debug_id,
4773					     (u64)node_ptr,
4774					     (u64)node_cookie);
4775				rb_erase(&node->rb_node, &proc->nodes);
4776				binder_inner_proc_unlock(proc);
4777				binder_node_lock(node);
4778				/*
4779				 * Acquire the node lock before freeing the
4780				 * node to serialize with other threads that
4781				 * may have been holding the node lock while
4782				 * decrementing this node (avoids race where
4783				 * this thread frees while the other thread
4784				 * is unlocking the node after the final
4785				 * decrement)
4786				 */
4787				binder_node_unlock(node);
4788				binder_free_node(node);
4789			} else
4790				binder_inner_proc_unlock(proc);
4791
4792			if (weak && !has_weak_ref)
4793				ret = binder_put_node_cmd(
4794						proc, thread, &ptr, node_ptr,
4795						node_cookie, node_debug_id,
4796						BR_INCREFS, "BR_INCREFS");
4797			if (!ret && strong && !has_strong_ref)
4798				ret = binder_put_node_cmd(
4799						proc, thread, &ptr, node_ptr,
4800						node_cookie, node_debug_id,
4801						BR_ACQUIRE, "BR_ACQUIRE");
4802			if (!ret && !strong && has_strong_ref)
4803				ret = binder_put_node_cmd(
4804						proc, thread, &ptr, node_ptr,
4805						node_cookie, node_debug_id,
4806						BR_RELEASE, "BR_RELEASE");
4807			if (!ret && !weak && has_weak_ref)
4808				ret = binder_put_node_cmd(
4809						proc, thread, &ptr, node_ptr,
4810						node_cookie, node_debug_id,
4811						BR_DECREFS, "BR_DECREFS");
4812			if (orig_ptr == ptr)
4813				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4814					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
4815					     proc->pid, thread->pid,
4816					     node_debug_id,
4817					     (u64)node_ptr,
4818					     (u64)node_cookie);
4819			if (ret)
4820				return ret;
4821		} break;
4822		case BINDER_WORK_DEAD_BINDER:
4823		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4824		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4825			struct binder_ref_death *death;
4826			uint32_t cmd;
4827			binder_uintptr_t cookie;
4828
4829			death = container_of(w, struct binder_ref_death, work);
4830			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4831				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4832			else
4833				cmd = BR_DEAD_BINDER;
4834			cookie = death->cookie;
4835
4836			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4837				     "%d:%d %s %016llx\n",
4838				      proc->pid, thread->pid,
4839				      cmd == BR_DEAD_BINDER ?
4840				      "BR_DEAD_BINDER" :
4841				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4842				      (u64)cookie);
4843			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4844				binder_inner_proc_unlock(proc);
4845				kfree(death);
4846				binder_stats_deleted(BINDER_STAT_DEATH);
4847			} else {
4848				binder_enqueue_work_ilocked(
4849						w, &proc->delivered_death);
4850				binder_inner_proc_unlock(proc);
4851			}
4852			if (put_user(cmd, (uint32_t __user *)ptr))
4853				return -EFAULT;
4854			ptr += sizeof(uint32_t);
4855			if (put_user(cookie,
4856				     (binder_uintptr_t __user *)ptr))
4857				return -EFAULT;
4858			ptr += sizeof(binder_uintptr_t);
4859			binder_stat_br(proc, thread, cmd);
4860			if (cmd == BR_DEAD_BINDER)
4861				goto done; /* DEAD_BINDER notifications can cause transactions */
4862		} break;
4863		default:
4864			binder_inner_proc_unlock(proc);
4865			pr_err("%d:%d: bad work type %d\n",
4866			       proc->pid, thread->pid, w->type);
4867			break;
4868		}
4869
4870		if (!t)
4871			continue;
4872
4873		BUG_ON(t->buffer == NULL);
4874		if (t->buffer->target_node) {
4875			struct binder_node *target_node = t->buffer->target_node;
4876
4877			trd->target.ptr = target_node->ptr;
4878			trd->cookie =  target_node->cookie;
4879			t->saved_priority = task_nice(current);
4880			if (t->priority < target_node->min_priority &&
4881			    !(t->flags & TF_ONE_WAY))
4882				binder_set_nice(t->priority);
4883			else if (!(t->flags & TF_ONE_WAY) ||
4884				 t->saved_priority > target_node->min_priority)
4885				binder_set_nice(target_node->min_priority);
4886			cmd = BR_TRANSACTION;
4887		} else {
4888			trd->target.ptr = 0;
4889			trd->cookie = 0;
4890			cmd = BR_REPLY;
4891		}
4892		trd->code = t->code;
4893		trd->flags = t->flags;
4894		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4895
4896		t_from = binder_get_txn_from(t);
4897		if (t_from) {
4898			struct task_struct *sender = t_from->proc->tsk;
4899
4900			trd->sender_pid =
4901				task_tgid_nr_ns(sender,
4902						task_active_pid_ns(current));
4903#ifdef CONFIG_BINDER_SENDER_INFO
4904			binder_inner_proc_lock(thread->proc);
4905			thread->sender_pid_nr = task_tgid_nr(sender);
4906			binder_inner_proc_unlock(thread->proc);
4907#endif
4908		} else {
4909			trd->sender_pid = 0;
4910#ifdef CONFIG_BINDER_SENDER_INFO
4911			binder_inner_proc_lock(thread->proc);
4912			thread->sender_pid_nr = 0;
4913			binder_inner_proc_unlock(thread->proc);
4914#endif
4915		}
4916
4917		ret = binder_apply_fd_fixups(proc, t);
4918		if (ret) {
4919			struct binder_buffer *buffer = t->buffer;
4920			bool oneway = !!(t->flags & TF_ONE_WAY);
4921			int tid = t->debug_id;
4922
4923			if (t_from)
4924				binder_thread_dec_tmpref(t_from);
4925			buffer->transaction = NULL;
4926			binder_cleanup_transaction(t, "fd fixups failed",
4927						   BR_FAILED_REPLY);
4928			binder_free_buf(proc, thread, buffer, true);
4929			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4930				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4931				     proc->pid, thread->pid,
4932				     oneway ? "async " :
4933					(cmd == BR_REPLY ? "reply " : ""),
4934				     tid, BR_FAILED_REPLY, ret, __LINE__);
4935			if (cmd == BR_REPLY) {
4936				cmd = BR_FAILED_REPLY;
4937				if (put_user(cmd, (uint32_t __user *)ptr))
4938					return -EFAULT;
4939				ptr += sizeof(uint32_t);
4940				binder_stat_br(proc, thread, cmd);
4941				break;
4942			}
4943			continue;
4944		}
4945		trd->data_size = t->buffer->data_size;
4946		trd->offsets_size = t->buffer->offsets_size;
4947		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4948		trd->data.ptr.offsets = trd->data.ptr.buffer +
4949					ALIGN(t->buffer->data_size,
4950					    sizeof(void *));
4951
4952		tr.secctx = t->security_ctx;
4953		if (t->security_ctx) {
4954			cmd = BR_TRANSACTION_SEC_CTX;
4955			trsize = sizeof(tr);
4956		}
4957		if (put_user(cmd, (uint32_t __user *)ptr)) {
4958			if (t_from)
4959				binder_thread_dec_tmpref(t_from);
4960
4961			binder_cleanup_transaction(t, "put_user failed",
4962						   BR_FAILED_REPLY);
4963
4964			return -EFAULT;
4965		}
4966		ptr += sizeof(uint32_t);
4967		if (copy_to_user(ptr, &tr, trsize)) {
4968			if (t_from)
4969				binder_thread_dec_tmpref(t_from);
4970
4971			binder_cleanup_transaction(t, "copy_to_user failed",
4972						   BR_FAILED_REPLY);
4973
4974			return -EFAULT;
4975		}
4976		ptr += trsize;
4977
4978		trace_binder_transaction_received(t);
4979		binder_stat_br(proc, thread, cmd);
4980		binder_debug(BINDER_DEBUG_TRANSACTION,
4981			     "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4982			     proc->pid, thread->pid,
4983			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4984				(cmd == BR_TRANSACTION_SEC_CTX) ?
4985				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4986			     t->debug_id, t_from ? t_from->proc->pid : 0,
4987			     t_from ? t_from->pid : 0, cmd,
4988			     t->buffer->data_size, t->buffer->offsets_size,
4989			     (u64)trd->data.ptr.buffer,
4990			     (u64)trd->data.ptr.offsets);
4991
4992		if (t_from)
4993			binder_thread_dec_tmpref(t_from);
4994		t->buffer->allow_user_free = 1;
4995#ifdef CONFIG_ACCESS_TOKENID
4996		binder_inner_proc_lock(thread->proc);
4997		thread->tokens.sender_tokenid = t->sender_tokenid;
4998		thread->tokens.first_tokenid = t->first_tokenid;
4999		binder_inner_proc_unlock(thread->proc);
5000#endif /* CONFIG_ACCESS_TOKENID */
5001		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
5002			binder_inner_proc_lock(thread->proc);
5003			t->to_parent = thread->transaction_stack;
5004			t->to_thread = thread;
5005			thread->transaction_stack = t;
5006			binder_inner_proc_unlock(thread->proc);
5007		} else {
5008			binder_free_transaction(t);
5009		}
5010		break;
5011	}
5012
5013done:
5014
5015	*consumed = ptr - buffer;
5016	binder_inner_proc_lock(proc);
5017	if (proc->requested_threads == 0 &&
5018	    list_empty(&thread->proc->waiting_threads) &&
5019	    proc->requested_threads_started < proc->max_threads &&
5020	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
5021	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
5022	     /*spawn a new thread if we leave this out */) {
5023		proc->requested_threads++;
5024		binder_inner_proc_unlock(proc);
5025		binder_debug(BINDER_DEBUG_THREADS,
5026			     "%d:%d BR_SPAWN_LOOPER\n",
5027			     proc->pid, thread->pid);
5028		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
5029			return -EFAULT;
5030		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
5031	} else
5032		binder_inner_proc_unlock(proc);
5033	return 0;
5034}
5035
5036static void binder_release_work(struct binder_proc *proc,
5037				struct list_head *list)
5038{
5039	struct binder_work *w;
5040	enum binder_work_type wtype;
5041
5042	while (1) {
5043		binder_inner_proc_lock(proc);
5044		w = binder_dequeue_work_head_ilocked(list);
5045		wtype = w ? w->type : 0;
5046		binder_inner_proc_unlock(proc);
5047		if (!w)
5048			return;
5049
5050		switch (wtype) {
5051		case BINDER_WORK_TRANSACTION: {
5052			struct binder_transaction *t;
5053
5054			t = container_of(w, struct binder_transaction, work);
5055
5056			binder_cleanup_transaction(t, "process died.",
5057						   BR_DEAD_REPLY);
5058		} break;
5059		case BINDER_WORK_RETURN_ERROR: {
5060			struct binder_error *e = container_of(
5061					w, struct binder_error, work);
5062
5063			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5064				"undelivered TRANSACTION_ERROR: %u\n",
5065				e->cmd);
5066		} break;
5067		case BINDER_WORK_TRANSACTION_COMPLETE: {
5068			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5069				"undelivered TRANSACTION_COMPLETE\n");
5070			kfree(w);
5071			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
5072		} break;
5073		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5074		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
5075			struct binder_ref_death *death;
5076
5077			death = container_of(w, struct binder_ref_death, work);
5078			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5079				"undelivered death notification, %016llx\n",
5080				(u64)death->cookie);
5081			kfree(death);
5082			binder_stats_deleted(BINDER_STAT_DEATH);
5083		} break;
5084		case BINDER_WORK_NODE:
5085			break;
5086		default:
5087			pr_err("unexpected work type, %d, not freed\n",
5088			       wtype);
5089			break;
5090		}
5091	}
5092
5093}
5094
5095static struct binder_thread *binder_get_thread_ilocked(
5096		struct binder_proc *proc, struct binder_thread *new_thread)
5097{
5098	struct binder_thread *thread = NULL;
5099	struct rb_node *parent = NULL;
5100	struct rb_node **p = &proc->threads.rb_node;
5101
5102	while (*p) {
5103		parent = *p;
5104		thread = rb_entry(parent, struct binder_thread, rb_node);
5105
5106		if (current->pid < thread->pid)
5107			p = &(*p)->rb_left;
5108		else if (current->pid > thread->pid)
5109			p = &(*p)->rb_right;
5110		else
5111			return thread;
5112	}
5113	if (!new_thread)
5114		return NULL;
5115	thread = new_thread;
5116	binder_stats_created(BINDER_STAT_THREAD);
5117	thread->proc = proc;
5118	thread->pid = current->pid;
5119	atomic_set(&thread->tmp_ref, 0);
5120	init_waitqueue_head(&thread->wait);
5121	INIT_LIST_HEAD(&thread->todo);
5122	rb_link_node(&thread->rb_node, parent, p);
5123	rb_insert_color(&thread->rb_node, &proc->threads);
5124	thread->looper_need_return = true;
5125	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
5126	thread->return_error.cmd = BR_OK;
5127	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
5128	thread->reply_error.cmd = BR_OK;
5129	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
5130	return thread;
5131}
5132
5133static struct binder_thread *binder_get_thread(struct binder_proc *proc)
5134{
5135	struct binder_thread *thread;
5136	struct binder_thread *new_thread;
5137
5138	binder_inner_proc_lock(proc);
5139	thread = binder_get_thread_ilocked(proc, NULL);
5140	binder_inner_proc_unlock(proc);
5141	if (!thread) {
5142		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
5143		if (new_thread == NULL)
5144			return NULL;
5145		binder_inner_proc_lock(proc);
5146		thread = binder_get_thread_ilocked(proc, new_thread);
5147		binder_inner_proc_unlock(proc);
5148		if (thread != new_thread)
5149			kfree(new_thread);
5150	}
5151	return thread;
5152}
5153
5154static void binder_free_proc(struct binder_proc *proc)
5155{
5156	struct binder_device *device;
5157
5158	BUG_ON(!list_empty(&proc->todo));
5159	BUG_ON(!list_empty(&proc->delivered_death));
5160	device = container_of(proc->context, struct binder_device, context);
5161	if (refcount_dec_and_test(&device->ref)) {
5162		kfree(proc->context->name);
5163		kfree(device);
5164	}
5165	binder_alloc_deferred_release(&proc->alloc);
5166	put_task_struct(proc->tsk);
5167	put_cred(proc->cred);
5168	binder_stats_deleted(BINDER_STAT_PROC);
5169	kfree(proc);
5170}
5171
5172static void binder_free_thread(struct binder_thread *thread)
5173{
5174	BUG_ON(!list_empty(&thread->todo));
5175	binder_stats_deleted(BINDER_STAT_THREAD);
5176	binder_proc_dec_tmpref(thread->proc);
5177	kfree(thread);
5178}
5179
5180static int binder_thread_release(struct binder_proc *proc,
5181				 struct binder_thread *thread)
5182{
5183	struct binder_transaction *t;
5184	struct binder_transaction *send_reply = NULL;
5185	int active_transactions = 0;
5186	struct binder_transaction *last_t = NULL;
5187
5188	binder_inner_proc_lock(thread->proc);
5189	/*
5190	 * take a ref on the proc so it survives
5191	 * after we remove this thread from proc->threads.
5192	 * The corresponding dec is when we actually
5193	 * free the thread in binder_free_thread()
5194	 */
5195	proc->tmp_ref++;
5196	/*
5197	 * take a ref on this thread to ensure it
5198	 * survives while we are releasing it
5199	 */
5200	atomic_inc(&thread->tmp_ref);
5201	rb_erase(&thread->rb_node, &proc->threads);
5202	t = thread->transaction_stack;
5203	if (t) {
5204		spin_lock(&t->lock);
5205		if (t->to_thread == thread)
5206			send_reply = t;
5207	} else {
5208		__acquire(&t->lock);
5209	}
5210	thread->is_dead = true;
5211
5212	while (t) {
5213		last_t = t;
5214		active_transactions++;
5215		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
5216			     "release %d:%d transaction %d %s, still active\n",
5217			      proc->pid, thread->pid,
5218			     t->debug_id,
5219			     (t->to_thread == thread) ? "in" : "out");
5220
5221		if (t->to_thread == thread) {
5222			t->to_proc = NULL;
5223			t->to_thread = NULL;
5224			if (t->buffer) {
5225				t->buffer->transaction = NULL;
5226				t->buffer = NULL;
5227			}
5228			t = t->to_parent;
5229		} else if (t->from == thread) {
5230			t->from = NULL;
5231#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
5232			t->async_from_pid = -1;
5233			t->async_from_tid = -1;
5234#endif
5235			t = t->from_parent;
5236		} else
5237			BUG();
5238		spin_unlock(&last_t->lock);
5239		if (t)
5240			spin_lock(&t->lock);
5241		else
5242			__acquire(&t->lock);
5243	}
5244	/* annotation for sparse, lock not acquired in last iteration above */
5245	__release(&t->lock);
5246
5247	/*
5248	 * If this thread used poll, make sure we remove the waitqueue from any
5249	 * poll data structures holding it.
5250	 */
5251	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5252		wake_up_pollfree(&thread->wait);
5253
5254	binder_inner_proc_unlock(thread->proc);
5255
5256	/*
5257	 * This is needed to avoid races between wake_up_pollfree() above and
5258	 * someone else removing the last entry from the queue for other reasons
5259	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
5260	 * descriptor being closed).  Such other users hold an RCU read lock, so
5261	 * we can be sure they're done after we call synchronize_rcu().
5262	 */
5263	if (thread->looper & BINDER_LOOPER_STATE_POLL)
5264		synchronize_rcu();
5265
5266	if (send_reply)
5267		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
5268	binder_release_work(proc, &thread->todo);
5269	binder_thread_dec_tmpref(thread);
5270	return active_transactions;
5271}
5272
5273static __poll_t binder_poll(struct file *filp,
5274				struct poll_table_struct *wait)
5275{
5276	struct binder_proc *proc = filp->private_data;
5277	struct binder_thread *thread = NULL;
5278	bool wait_for_proc_work;
5279
5280	thread = binder_get_thread(proc);
5281	if (!thread)
5282		return EPOLLERR;
5283
5284	binder_inner_proc_lock(thread->proc);
5285	thread->looper |= BINDER_LOOPER_STATE_POLL;
5286	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
5287
5288	binder_inner_proc_unlock(thread->proc);
5289
5290	poll_wait(filp, &thread->wait, wait);
5291
5292	if (binder_has_work(thread, wait_for_proc_work))
5293		return EPOLLIN;
5294
5295	return 0;
5296}
5297
5298static int binder_ioctl_write_read(struct file *filp,
5299				unsigned int cmd, unsigned long arg,
5300				struct binder_thread *thread)
5301{
5302	int ret = 0;
5303	struct binder_proc *proc = filp->private_data;
5304	unsigned int size = _IOC_SIZE(cmd);
5305	void __user *ubuf = (void __user *)arg;
5306	struct binder_write_read bwr;
5307
5308	if (size != sizeof(struct binder_write_read)) {
5309		ret = -EINVAL;
5310		goto out;
5311	}
5312	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
5313		ret = -EFAULT;
5314		goto out;
5315	}
5316	binder_debug(BINDER_DEBUG_READ_WRITE,
5317		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
5318		     proc->pid, thread->pid,
5319		     (u64)bwr.write_size, (u64)bwr.write_buffer,
5320		     (u64)bwr.read_size, (u64)bwr.read_buffer);
5321
5322	if (bwr.write_size > 0) {
5323		ret = binder_thread_write(proc, thread,
5324					  bwr.write_buffer,
5325					  bwr.write_size,
5326					  &bwr.write_consumed);
5327		trace_binder_write_done(ret);
5328		if (ret < 0) {
5329			bwr.read_consumed = 0;
5330			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5331				ret = -EFAULT;
5332			goto out;
5333		}
5334	}
5335	if (bwr.read_size > 0) {
5336		ret = binder_thread_read(proc, thread, bwr.read_buffer,
5337					 bwr.read_size,
5338					 &bwr.read_consumed,
5339					 filp->f_flags & O_NONBLOCK);
5340		trace_binder_read_done(ret);
5341		binder_inner_proc_lock(proc);
5342		if (!binder_worklist_empty_ilocked(&proc->todo))
5343			binder_wakeup_proc_ilocked(proc);
5344		binder_inner_proc_unlock(proc);
5345		if (ret < 0) {
5346			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
5347				ret = -EFAULT;
5348			goto out;
5349		}
5350	}
5351	binder_debug(BINDER_DEBUG_READ_WRITE,
5352		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
5353		     proc->pid, thread->pid,
5354		     (u64)bwr.write_consumed, (u64)bwr.write_size,
5355		     (u64)bwr.read_consumed, (u64)bwr.read_size);
5356	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
5357		ret = -EFAULT;
5358		goto out;
5359	}
5360out:
5361	return ret;
5362}
5363
5364static int binder_ioctl_set_ctx_mgr(struct file *filp,
5365				    struct flat_binder_object *fbo)
5366{
5367	int ret = 0;
5368	struct binder_proc *proc = filp->private_data;
5369	struct binder_context *context = proc->context;
5370	struct binder_node *new_node;
5371	kuid_t curr_euid = current_euid();
5372
5373	mutex_lock(&context->context_mgr_node_lock);
5374	if (context->binder_context_mgr_node) {
5375		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
5376		ret = -EBUSY;
5377		goto out;
5378	}
5379	ret = security_binder_set_context_mgr(proc->cred);
5380	if (ret < 0)
5381		goto out;
5382	if (uid_valid(context->binder_context_mgr_uid)) {
5383		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
5384			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
5385			       from_kuid(&init_user_ns, curr_euid),
5386			       from_kuid(&init_user_ns,
5387					 context->binder_context_mgr_uid));
5388			ret = -EPERM;
5389			goto out;
5390		}
5391	} else {
5392		context->binder_context_mgr_uid = curr_euid;
5393	}
5394	new_node = binder_new_node(proc, fbo);
5395	if (!new_node) {
5396		ret = -ENOMEM;
5397		goto out;
5398	}
5399	binder_node_lock(new_node);
5400	new_node->local_weak_refs++;
5401	new_node->local_strong_refs++;
5402	new_node->has_strong_ref = 1;
5403	new_node->has_weak_ref = 1;
5404	context->binder_context_mgr_node = new_node;
5405	binder_node_unlock(new_node);
5406	binder_put_node(new_node);
5407out:
5408	mutex_unlock(&context->context_mgr_node_lock);
5409	return ret;
5410}
5411
5412static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5413		struct binder_node_info_for_ref *info)
5414{
5415	struct binder_node *node;
5416	struct binder_context *context = proc->context;
5417	__u32 handle = info->handle;
5418
5419	if (info->strong_count || info->weak_count || info->reserved1 ||
5420	    info->reserved2 || info->reserved3) {
5421		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5422				  proc->pid);
5423		return -EINVAL;
5424	}
5425
5426	/* This ioctl may only be used by the context manager */
5427	mutex_lock(&context->context_mgr_node_lock);
5428	if (!context->binder_context_mgr_node ||
5429		context->binder_context_mgr_node->proc != proc) {
5430		mutex_unlock(&context->context_mgr_node_lock);
5431		return -EPERM;
5432	}
5433	mutex_unlock(&context->context_mgr_node_lock);
5434
5435	node = binder_get_node_from_ref(proc, handle, true, NULL);
5436	if (!node)
5437		return -EINVAL;
5438
5439	info->strong_count = node->local_strong_refs +
5440		node->internal_strong_refs;
5441	info->weak_count = node->local_weak_refs;
5442
5443	binder_put_node(node);
5444
5445	return 0;
5446}
5447
5448static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5449				struct binder_node_debug_info *info)
5450{
5451	struct rb_node *n;
5452	binder_uintptr_t ptr = info->ptr;
5453
5454	memset(info, 0, sizeof(*info));
5455
5456	binder_inner_proc_lock(proc);
5457	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5458		struct binder_node *node = rb_entry(n, struct binder_node,
5459						    rb_node);
5460		if (node->ptr > ptr) {
5461			info->ptr = node->ptr;
5462			info->cookie = node->cookie;
5463			info->has_strong_ref = node->has_strong_ref;
5464			info->has_weak_ref = node->has_weak_ref;
5465			break;
5466		}
5467	}
5468	binder_inner_proc_unlock(proc);
5469
5470	return 0;
5471}
5472
5473static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5474{
5475	int ret;
5476	struct binder_proc *proc = filp->private_data;
5477	struct binder_thread *thread;
5478	unsigned int size = _IOC_SIZE(cmd);
5479	void __user *ubuf = (void __user *)arg;
5480
5481	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
5482			proc->pid, current->pid, cmd, arg);*/
5483
5484	binder_selftest_alloc(&proc->alloc);
5485
5486	trace_binder_ioctl(cmd, arg);
5487
5488	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5489	if (ret)
5490		goto err_unlocked;
5491
5492	thread = binder_get_thread(proc);
5493	if (thread == NULL) {
5494		ret = -ENOMEM;
5495		goto err;
5496	}
5497
5498	switch (cmd) {
5499	case BINDER_WRITE_READ:
5500		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5501		if (ret)
5502			goto err;
5503		break;
5504	case BINDER_SET_MAX_THREADS: {
5505		int max_threads;
5506
5507		if (copy_from_user(&max_threads, ubuf,
5508				   sizeof(max_threads))) {
5509			ret = -EINVAL;
5510			goto err;
5511		}
5512		binder_inner_proc_lock(proc);
5513		proc->max_threads = max_threads;
5514		binder_inner_proc_unlock(proc);
5515		break;
5516	}
5517	case BINDER_SET_CONTEXT_MGR_EXT: {
5518		struct flat_binder_object fbo;
5519
5520		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5521			ret = -EINVAL;
5522			goto err;
5523		}
5524		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5525		if (ret)
5526			goto err;
5527		break;
5528	}
5529	case BINDER_SET_CONTEXT_MGR:
5530		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5531		if (ret)
5532			goto err;
5533		break;
5534	case BINDER_THREAD_EXIT:
5535		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5536			     proc->pid, thread->pid);
5537		binder_thread_release(proc, thread);
5538		thread = NULL;
5539		break;
5540	case BINDER_VERSION: {
5541		struct binder_version __user *ver = ubuf;
5542
5543		if (size != sizeof(struct binder_version)) {
5544			ret = -EINVAL;
5545			goto err;
5546		}
5547		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5548			     &ver->protocol_version)) {
5549			ret = -EINVAL;
5550			goto err;
5551		}
5552		break;
5553	}
5554	case BINDER_GET_NODE_INFO_FOR_REF: {
5555		struct binder_node_info_for_ref info;
5556
5557		if (copy_from_user(&info, ubuf, sizeof(info))) {
5558			ret = -EFAULT;
5559			goto err;
5560		}
5561
5562		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5563		if (ret < 0)
5564			goto err;
5565
5566		if (copy_to_user(ubuf, &info, sizeof(info))) {
5567			ret = -EFAULT;
5568			goto err;
5569		}
5570
5571		break;
5572	}
5573	case BINDER_GET_NODE_DEBUG_INFO: {
5574		struct binder_node_debug_info info;
5575
5576		if (copy_from_user(&info, ubuf, sizeof(info))) {
5577			ret = -EFAULT;
5578			goto err;
5579		}
5580
5581		ret = binder_ioctl_get_node_debug_info(proc, &info);
5582		if (ret < 0)
5583			goto err;
5584
5585		if (copy_to_user(ubuf, &info, sizeof(info))) {
5586			ret = -EFAULT;
5587			goto err;
5588		}
5589		break;
5590	}
5591	case BINDER_FEATURE_SET: {
5592		struct binder_feature_set __user *features = ubuf;
5593
5594		if (size != sizeof(struct binder_feature_set)) {
5595			ret = -EINVAL;
5596			goto err;
5597		}
5598		if (put_user(BINDER_CURRENT_FEATURE_SET, &features->feature_set)) {
5599			ret = -EINVAL;
5600			goto err;
5601		}
5602		break;
5603	}
5604#ifdef CONFIG_ACCESS_TOKENID
5605	case BINDER_GET_ACCESS_TOKEN: {
5606		struct access_token __user *tokens = ubuf;
5607		u64 token, ftoken;
5608
5609		if (size != sizeof(struct access_token)) {
5610			ret = -EINVAL;
5611			goto err;
5612		}
5613		binder_inner_proc_lock(proc);
5614		token = thread->tokens.sender_tokenid;
5615		ftoken = thread->tokens.first_tokenid;
5616		binder_inner_proc_unlock(proc);
5617		if (put_user(token, &tokens->sender_tokenid)) {
5618			ret = -EINVAL;
5619			goto err;
5620		}
5621		if (put_user(ftoken, &tokens->first_tokenid)) {
5622			ret = -EINVAL;
5623			goto err;
5624		}
5625		break;
5626	}
5627#endif /* CONFIG_ACCESS_TOKENID */
5628
5629#ifdef CONFIG_BINDER_SENDER_INFO
5630	case BINDER_GET_SENDER_INFO: {
5631		struct binder_sender_info __user *sender = ubuf;
5632		u64 token, ftoken, sender_pid_nr;
5633		if (size != sizeof(struct binder_sender_info)) {
5634			ret = -EINVAL;
5635			goto err;
5636		}
5637		binder_inner_proc_lock(proc);
5638#ifdef CONFIG_ACCESS_TOKENID
5639		token = thread->tokens.sender_tokenid;
5640		ftoken = thread->tokens.first_tokenid;
5641#endif /*CONFIG_ACCESS_TOKENID*/
5642		sender_pid_nr = thread->sender_pid_nr;
5643		binder_inner_proc_unlock(proc);
5644#ifdef CONFIG_ACCESS_TOKENID
5645		if (put_user(token, &sender->tokens.sender_tokenid)) {
5646			ret = -EFAULT;
5647			goto err;
5648		}
5649		if (put_user(ftoken, &sender->tokens.first_tokenid)) {
5650			ret = -EFAULT;
5651			goto err;
5652		}
5653#endif /*CONFIG_ACCESS_TOKENID*/
5654		if (put_user(sender_pid_nr, &sender->sender_pid_nr)) {
5655			ret = -EFAULT;
5656			goto err;
5657		}
5658		break;
5659	}
5660#endif /* CONFIG_BINDER_SENDER_INFO */
5661	default:
5662		ret = -EINVAL;
5663		goto err;
5664	}
5665	ret = 0;
5666err:
5667	if (thread)
5668		thread->looper_need_return = false;
5669	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5670	if (ret && ret != -ERESTARTSYS)
5671		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5672err_unlocked:
5673	trace_binder_ioctl_done(ret);
5674	return ret;
5675}
5676
5677static void binder_vma_open(struct vm_area_struct *vma)
5678{
5679	struct binder_proc *proc = vma->vm_private_data;
5680
5681	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5682		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5683		     proc->pid, vma->vm_start, vma->vm_end,
5684		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5685		     (unsigned long)pgprot_val(vma->vm_page_prot));
5686}
5687
5688static void binder_vma_close(struct vm_area_struct *vma)
5689{
5690	struct binder_proc *proc = vma->vm_private_data;
5691
5692	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5693		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5694		     proc->pid, vma->vm_start, vma->vm_end,
5695		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5696		     (unsigned long)pgprot_val(vma->vm_page_prot));
5697	binder_alloc_vma_close(&proc->alloc);
5698}
5699
5700static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5701{
5702	return VM_FAULT_SIGBUS;
5703}
5704
5705static const struct vm_operations_struct binder_vm_ops = {
5706	.open = binder_vma_open,
5707	.close = binder_vma_close,
5708	.fault = binder_vm_fault,
5709};
5710
5711static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5712{
5713	struct binder_proc *proc = filp->private_data;
5714
5715	if (proc->tsk != current->group_leader)
5716		return -EINVAL;
5717
5718	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5719		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5720		     __func__, proc->pid, vma->vm_start, vma->vm_end,
5721		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5722		     (unsigned long)pgprot_val(vma->vm_page_prot));
5723
5724	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5725		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5726		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5727		return -EPERM;
5728	}
5729	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5730	vma->vm_flags &= ~VM_MAYWRITE;
5731
5732	vma->vm_ops = &binder_vm_ops;
5733	vma->vm_private_data = proc;
5734
5735	return binder_alloc_mmap_handler(&proc->alloc, vma);
5736}
5737
5738static int binder_open(struct inode *nodp, struct file *filp)
5739{
5740	struct binder_proc *proc, *itr;
5741	struct binder_device *binder_dev;
5742	struct binderfs_info *info;
5743	struct dentry *binder_binderfs_dir_entry_proc = NULL;
5744	bool existing_pid = false;
5745
5746	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5747		     current->group_leader->pid, current->pid);
5748
5749	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5750	if (proc == NULL)
5751		return -ENOMEM;
5752	spin_lock_init(&proc->inner_lock);
5753	spin_lock_init(&proc->outer_lock);
5754	get_task_struct(current->group_leader);
5755	proc->tsk = current->group_leader;
5756	proc->cred = get_cred(filp->f_cred);
5757	INIT_LIST_HEAD(&proc->todo);
5758	proc->default_priority = task_nice(current);
5759	/* binderfs stashes devices in i_private */
5760	if (is_binderfs_device(nodp)) {
5761		binder_dev = nodp->i_private;
5762		info = nodp->i_sb->s_fs_info;
5763		binder_binderfs_dir_entry_proc = info->proc_log_dir;
5764	} else {
5765		binder_dev = container_of(filp->private_data,
5766					  struct binder_device, miscdev);
5767	}
5768	refcount_inc(&binder_dev->ref);
5769	proc->context = &binder_dev->context;
5770	binder_alloc_init(&proc->alloc);
5771
5772	binder_stats_created(BINDER_STAT_PROC);
5773	proc->pid = current->group_leader->pid;
5774	INIT_LIST_HEAD(&proc->delivered_death);
5775	INIT_LIST_HEAD(&proc->waiting_threads);
5776	filp->private_data = proc;
5777
5778	mutex_lock(&binder_procs_lock);
5779	hlist_for_each_entry(itr, &binder_procs, proc_node) {
5780		if (itr->pid == proc->pid) {
5781			existing_pid = true;
5782			break;
5783		}
5784	}
5785	hlist_add_head(&proc->proc_node, &binder_procs);
5786	mutex_unlock(&binder_procs_lock);
5787
5788	if (binder_debugfs_dir_entry_proc && !existing_pid) {
5789		char strbuf[11];
5790
5791		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5792		/*
5793		 * proc debug entries are shared between contexts.
5794		 * Only create for the first PID to avoid debugfs log spamming
5795		 * The printing code will anyway print all contexts for a given
5796		 * PID so this is not a problem.
5797		 */
5798		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5799			binder_debugfs_dir_entry_proc,
5800			(void *)(unsigned long)proc->pid,
5801			&proc_fops);
5802	}
5803
5804	if (binder_binderfs_dir_entry_proc && !existing_pid) {
5805		char strbuf[11];
5806		struct dentry *binderfs_entry;
5807
5808		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5809		/*
5810		 * Similar to debugfs, the process specific log file is shared
5811		 * between contexts. Only create for the first PID.
5812		 * This is ok since same as debugfs, the log file will contain
5813		 * information on all contexts of a given PID.
5814		 */
5815		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5816			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5817		if (!IS_ERR(binderfs_entry)) {
5818			proc->binderfs_entry = binderfs_entry;
5819		} else {
5820			int error;
5821
5822			error = PTR_ERR(binderfs_entry);
5823			pr_warn("Unable to create file %s in binderfs (error %d)\n",
5824				strbuf, error);
5825		}
5826	}
5827
5828	return 0;
5829}
5830
5831static int binder_flush(struct file *filp, fl_owner_t id)
5832{
5833	struct binder_proc *proc = filp->private_data;
5834
5835	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5836
5837	return 0;
5838}
5839
5840static void binder_deferred_flush(struct binder_proc *proc)
5841{
5842	struct rb_node *n;
5843	int wake_count = 0;
5844
5845	binder_inner_proc_lock(proc);
5846	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5847		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5848
5849		thread->looper_need_return = true;
5850		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5851			wake_up_interruptible(&thread->wait);
5852			wake_count++;
5853		}
5854	}
5855	binder_inner_proc_unlock(proc);
5856
5857	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5858		     "binder_flush: %d woke %d threads\n", proc->pid,
5859		     wake_count);
5860}
5861
5862static int binder_release(struct inode *nodp, struct file *filp)
5863{
5864	struct binder_proc *proc = filp->private_data;
5865
5866	debugfs_remove(proc->debugfs_entry);
5867
5868	if (proc->binderfs_entry) {
5869		binderfs_remove_file(proc->binderfs_entry);
5870		proc->binderfs_entry = NULL;
5871	}
5872
5873	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5874
5875	return 0;
5876}
5877
5878static int binder_node_release(struct binder_node *node, int refs)
5879{
5880	struct binder_ref *ref;
5881	int death = 0;
5882	struct binder_proc *proc = node->proc;
5883
5884	binder_release_work(proc, &node->async_todo);
5885
5886	binder_node_lock(node);
5887	binder_inner_proc_lock(proc);
5888	binder_dequeue_work_ilocked(&node->work);
5889	/*
5890	 * The caller must have taken a temporary ref on the node,
5891	 */
5892	BUG_ON(!node->tmp_refs);
5893	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5894		binder_inner_proc_unlock(proc);
5895		binder_node_unlock(node);
5896		binder_free_node(node);
5897
5898		return refs;
5899	}
5900
5901	node->proc = NULL;
5902	node->local_strong_refs = 0;
5903	node->local_weak_refs = 0;
5904	binder_inner_proc_unlock(proc);
5905
5906	spin_lock(&binder_dead_nodes_lock);
5907	hlist_add_head(&node->dead_node, &binder_dead_nodes);
5908	spin_unlock(&binder_dead_nodes_lock);
5909
5910	hlist_for_each_entry(ref, &node->refs, node_entry) {
5911		refs++;
5912		/*
5913		 * Need the node lock to synchronize
5914		 * with new notification requests and the
5915		 * inner lock to synchronize with queued
5916		 * death notifications.
5917		 */
5918		binder_inner_proc_lock(ref->proc);
5919		if (!ref->death) {
5920			binder_inner_proc_unlock(ref->proc);
5921			continue;
5922		}
5923
5924		death++;
5925
5926		BUG_ON(!list_empty(&ref->death->work.entry));
5927		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5928		binder_enqueue_work_ilocked(&ref->death->work,
5929					    &ref->proc->todo);
5930		binder_wakeup_proc_ilocked(ref->proc);
5931		binder_inner_proc_unlock(ref->proc);
5932	}
5933
5934	binder_debug(BINDER_DEBUG_DEAD_BINDER,
5935		     "node %d now dead, refs %d, death %d\n",
5936		     node->debug_id, refs, death);
5937	binder_node_unlock(node);
5938	binder_put_node(node);
5939
5940	return refs;
5941}
5942
5943static void binder_deferred_release(struct binder_proc *proc)
5944{
5945	struct binder_context *context = proc->context;
5946	struct rb_node *n;
5947	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5948
5949	mutex_lock(&binder_procs_lock);
5950	hlist_del(&proc->proc_node);
5951	mutex_unlock(&binder_procs_lock);
5952
5953	mutex_lock(&context->context_mgr_node_lock);
5954	if (context->binder_context_mgr_node &&
5955	    context->binder_context_mgr_node->proc == proc) {
5956		binder_debug(BINDER_DEBUG_DEAD_BINDER,
5957			     "%s: %d context_mgr_node gone\n",
5958			     __func__, proc->pid);
5959		context->binder_context_mgr_node = NULL;
5960	}
5961	mutex_unlock(&context->context_mgr_node_lock);
5962	binder_inner_proc_lock(proc);
5963	/*
5964	 * Make sure proc stays alive after we
5965	 * remove all the threads
5966	 */
5967	proc->tmp_ref++;
5968
5969	proc->is_dead = true;
5970	threads = 0;
5971	active_transactions = 0;
5972	while ((n = rb_first(&proc->threads))) {
5973		struct binder_thread *thread;
5974
5975		thread = rb_entry(n, struct binder_thread, rb_node);
5976		binder_inner_proc_unlock(proc);
5977		threads++;
5978		active_transactions += binder_thread_release(proc, thread);
5979		binder_inner_proc_lock(proc);
5980	}
5981
5982	nodes = 0;
5983	incoming_refs = 0;
5984	while ((n = rb_first(&proc->nodes))) {
5985		struct binder_node *node;
5986
5987		node = rb_entry(n, struct binder_node, rb_node);
5988		nodes++;
5989		/*
5990		 * take a temporary ref on the node before
5991		 * calling binder_node_release() which will either
5992		 * kfree() the node or call binder_put_node()
5993		 */
5994		binder_inc_node_tmpref_ilocked(node);
5995		rb_erase(&node->rb_node, &proc->nodes);
5996		binder_inner_proc_unlock(proc);
5997		incoming_refs = binder_node_release(node, incoming_refs);
5998		binder_inner_proc_lock(proc);
5999	}
6000	binder_inner_proc_unlock(proc);
6001
6002	outgoing_refs = 0;
6003	binder_proc_lock(proc);
6004	while ((n = rb_first(&proc->refs_by_desc))) {
6005		struct binder_ref *ref;
6006
6007		ref = rb_entry(n, struct binder_ref, rb_node_desc);
6008		outgoing_refs++;
6009		binder_cleanup_ref_olocked(ref);
6010		binder_proc_unlock(proc);
6011		binder_free_ref(ref);
6012		binder_proc_lock(proc);
6013	}
6014	binder_proc_unlock(proc);
6015
6016	binder_release_work(proc, &proc->todo);
6017	binder_release_work(proc, &proc->delivered_death);
6018
6019	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
6020		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
6021		     __func__, proc->pid, threads, nodes, incoming_refs,
6022		     outgoing_refs, active_transactions);
6023
6024	binder_proc_dec_tmpref(proc);
6025}
6026
6027static void binder_deferred_func(struct work_struct *work)
6028{
6029	struct binder_proc *proc;
6030
6031	int defer;
6032
6033	do {
6034		mutex_lock(&binder_deferred_lock);
6035		if (!hlist_empty(&binder_deferred_list)) {
6036			proc = hlist_entry(binder_deferred_list.first,
6037					struct binder_proc, deferred_work_node);
6038			hlist_del_init(&proc->deferred_work_node);
6039			defer = proc->deferred_work;
6040			proc->deferred_work = 0;
6041		} else {
6042			proc = NULL;
6043			defer = 0;
6044		}
6045		mutex_unlock(&binder_deferred_lock);
6046
6047		if (defer & BINDER_DEFERRED_FLUSH)
6048			binder_deferred_flush(proc);
6049
6050		if (defer & BINDER_DEFERRED_RELEASE)
6051			binder_deferred_release(proc); /* frees proc */
6052	} while (proc);
6053}
6054static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
6055
6056static void
6057binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
6058{
6059	mutex_lock(&binder_deferred_lock);
6060	proc->deferred_work |= defer;
6061	if (hlist_unhashed(&proc->deferred_work_node)) {
6062		hlist_add_head(&proc->deferred_work_node,
6063				&binder_deferred_list);
6064		schedule_work(&binder_deferred_work);
6065	}
6066	mutex_unlock(&binder_deferred_lock);
6067}
6068
6069static void print_binder_transaction_ilocked(struct seq_file *m,
6070					     struct binder_proc *proc,
6071					     const char *prefix,
6072					     struct binder_transaction *t)
6073{
6074	struct binder_proc *to_proc;
6075	struct binder_buffer *buffer = t->buffer;
6076
6077	spin_lock(&t->lock);
6078	to_proc = t->to_proc;
6079	seq_printf(m,
6080		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
6081		   prefix, t->debug_id, t,
6082		   t->from ? t->from->proc->pid : 0,
6083		   t->from ? t->from->pid : 0,
6084		   to_proc ? to_proc->pid : 0,
6085		   t->to_thread ? t->to_thread->pid : 0,
6086		   t->code, t->flags, t->priority, t->need_reply);
6087	spin_unlock(&t->lock);
6088
6089	if (proc != to_proc) {
6090		/*
6091		 * Can only safely deref buffer if we are holding the
6092		 * correct proc inner lock for this node
6093		 */
6094		seq_puts(m, "\n");
6095		return;
6096	}
6097
6098	if (buffer == NULL) {
6099		seq_puts(m, " buffer free\n");
6100		return;
6101	}
6102	if (buffer->target_node)
6103		seq_printf(m, " node %d", buffer->target_node->debug_id);
6104	seq_printf(m, " size %zd:%zd data %pK\n",
6105		   buffer->data_size, buffer->offsets_size,
6106		   buffer->user_data);
6107}
6108
6109static void print_binder_work_ilocked(struct seq_file *m,
6110				     struct binder_proc *proc,
6111				     const char *prefix,
6112				     const char *transaction_prefix,
6113				     struct binder_work *w)
6114{
6115	struct binder_node *node;
6116	struct binder_transaction *t;
6117
6118	switch (w->type) {
6119	case BINDER_WORK_TRANSACTION:
6120		t = container_of(w, struct binder_transaction, work);
6121		print_binder_transaction_ilocked(
6122				m, proc, transaction_prefix, t);
6123		break;
6124	case BINDER_WORK_RETURN_ERROR: {
6125		struct binder_error *e = container_of(
6126				w, struct binder_error, work);
6127
6128		seq_printf(m, "%stransaction error: %u\n",
6129			   prefix, e->cmd);
6130	} break;
6131	case BINDER_WORK_TRANSACTION_COMPLETE:
6132		seq_printf(m, "%stransaction complete\n", prefix);
6133		break;
6134	case BINDER_WORK_NODE:
6135		node = container_of(w, struct binder_node, work);
6136		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
6137			   prefix, node->debug_id,
6138			   (u64)node->ptr, (u64)node->cookie);
6139		break;
6140	case BINDER_WORK_DEAD_BINDER:
6141		seq_printf(m, "%shas dead binder\n", prefix);
6142		break;
6143	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
6144		seq_printf(m, "%shas cleared dead binder\n", prefix);
6145		break;
6146	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
6147		seq_printf(m, "%shas cleared death notification\n", prefix);
6148		break;
6149	default:
6150		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
6151		break;
6152	}
6153}
6154
6155static void print_binder_thread_ilocked(struct seq_file *m,
6156					struct binder_thread *thread,
6157					int print_always)
6158{
6159	struct binder_transaction *t;
6160	struct binder_work *w;
6161	size_t start_pos = m->count;
6162	size_t header_pos;
6163
6164	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
6165			thread->pid, thread->looper,
6166			thread->looper_need_return,
6167			atomic_read(&thread->tmp_ref));
6168	header_pos = m->count;
6169	t = thread->transaction_stack;
6170	while (t) {
6171		if (t->from == thread) {
6172			print_binder_transaction_ilocked(m, thread->proc,
6173					"    outgoing transaction", t);
6174			t = t->from_parent;
6175		} else if (t->to_thread == thread) {
6176			print_binder_transaction_ilocked(m, thread->proc,
6177						 "    incoming transaction", t);
6178			t = t->to_parent;
6179		} else {
6180			print_binder_transaction_ilocked(m, thread->proc,
6181					"    bad transaction", t);
6182			t = NULL;
6183		}
6184	}
6185	list_for_each_entry(w, &thread->todo, entry) {
6186		print_binder_work_ilocked(m, thread->proc, "    ",
6187					  "    pending transaction", w);
6188	}
6189	if (!print_always && m->count == header_pos)
6190		m->count = start_pos;
6191}
6192
6193static void print_binder_node_nilocked(struct seq_file *m,
6194				       struct binder_node *node)
6195{
6196	struct binder_ref *ref;
6197	struct binder_work *w;
6198	int count;
6199
6200	count = 0;
6201	hlist_for_each_entry(ref, &node->refs, node_entry)
6202		count++;
6203
6204	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
6205		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
6206		   node->has_strong_ref, node->has_weak_ref,
6207		   node->local_strong_refs, node->local_weak_refs,
6208		   node->internal_strong_refs, count, node->tmp_refs);
6209	if (count) {
6210		seq_puts(m, " proc");
6211		hlist_for_each_entry(ref, &node->refs, node_entry)
6212			seq_printf(m, " %d", ref->proc->pid);
6213	}
6214	seq_puts(m, "\n");
6215	if (node->proc) {
6216		list_for_each_entry(w, &node->async_todo, entry)
6217			print_binder_work_ilocked(m, node->proc, "    ",
6218					  "    pending async transaction", w);
6219	}
6220}
6221
6222static void print_binder_ref_olocked(struct seq_file *m,
6223				     struct binder_ref *ref)
6224{
6225	binder_node_lock(ref->node);
6226	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
6227		   ref->data.debug_id, ref->data.desc,
6228		   ref->node->proc ? "" : "dead ",
6229		   ref->node->debug_id, ref->data.strong,
6230		   ref->data.weak, ref->death);
6231	binder_node_unlock(ref->node);
6232}
6233
6234static void print_binder_proc(struct seq_file *m,
6235			      struct binder_proc *proc, int print_all)
6236{
6237	struct binder_work *w;
6238	struct rb_node *n;
6239	size_t start_pos = m->count;
6240	size_t header_pos;
6241	struct binder_node *last_node = NULL;
6242
6243	seq_printf(m, "proc %d\n", proc->pid);
6244	seq_printf(m, "context %s\n", proc->context->name);
6245	header_pos = m->count;
6246
6247	binder_inner_proc_lock(proc);
6248	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6249		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
6250						rb_node), print_all);
6251
6252	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6253		struct binder_node *node = rb_entry(n, struct binder_node,
6254						    rb_node);
6255		if (!print_all && !node->has_async_transaction)
6256			continue;
6257
6258		/*
6259		 * take a temporary reference on the node so it
6260		 * survives and isn't removed from the tree
6261		 * while we print it.
6262		 */
6263		binder_inc_node_tmpref_ilocked(node);
6264		/* Need to drop inner lock to take node lock */
6265		binder_inner_proc_unlock(proc);
6266		if (last_node)
6267			binder_put_node(last_node);
6268		binder_node_inner_lock(node);
6269		print_binder_node_nilocked(m, node);
6270		binder_node_inner_unlock(node);
6271		last_node = node;
6272		binder_inner_proc_lock(proc);
6273	}
6274	binder_inner_proc_unlock(proc);
6275	if (last_node)
6276		binder_put_node(last_node);
6277
6278	if (print_all) {
6279		binder_proc_lock(proc);
6280		for (n = rb_first(&proc->refs_by_desc);
6281		     n != NULL;
6282		     n = rb_next(n))
6283			print_binder_ref_olocked(m, rb_entry(n,
6284							    struct binder_ref,
6285							    rb_node_desc));
6286		binder_proc_unlock(proc);
6287	}
6288	binder_alloc_print_allocated(m, &proc->alloc);
6289	binder_inner_proc_lock(proc);
6290	list_for_each_entry(w, &proc->todo, entry)
6291		print_binder_work_ilocked(m, proc, "  ",
6292					  "  pending transaction", w);
6293	list_for_each_entry(w, &proc->delivered_death, entry) {
6294		seq_puts(m, "  has delivered dead binder\n");
6295		break;
6296	}
6297	binder_inner_proc_unlock(proc);
6298	if (!print_all && m->count == header_pos)
6299		m->count = start_pos;
6300}
6301
6302static const char * const binder_return_strings[] = {
6303	"BR_ERROR",
6304	"BR_OK",
6305	"BR_TRANSACTION",
6306	"BR_REPLY",
6307	"BR_ACQUIRE_RESULT",
6308	"BR_DEAD_REPLY",
6309	"BR_TRANSACTION_COMPLETE",
6310	"BR_INCREFS",
6311	"BR_ACQUIRE",
6312	"BR_RELEASE",
6313	"BR_DECREFS",
6314	"BR_ATTEMPT_ACQUIRE",
6315	"BR_NOOP",
6316	"BR_SPAWN_LOOPER",
6317	"BR_FINISHED",
6318	"BR_DEAD_BINDER",
6319	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
6320	"BR_FAILED_REPLY"
6321};
6322
6323static const char * const binder_command_strings[] = {
6324	"BC_TRANSACTION",
6325	"BC_REPLY",
6326	"BC_ACQUIRE_RESULT",
6327	"BC_FREE_BUFFER",
6328	"BC_INCREFS",
6329	"BC_ACQUIRE",
6330	"BC_RELEASE",
6331	"BC_DECREFS",
6332	"BC_INCREFS_DONE",
6333	"BC_ACQUIRE_DONE",
6334	"BC_ATTEMPT_ACQUIRE",
6335	"BC_REGISTER_LOOPER",
6336	"BC_ENTER_LOOPER",
6337	"BC_EXIT_LOOPER",
6338	"BC_REQUEST_DEATH_NOTIFICATION",
6339	"BC_CLEAR_DEATH_NOTIFICATION",
6340	"BC_DEAD_BINDER_DONE",
6341	"BC_TRANSACTION_SG",
6342	"BC_REPLY_SG",
6343};
6344
6345static const char * const binder_objstat_strings[] = {
6346	"proc",
6347	"thread",
6348	"node",
6349	"ref",
6350	"death",
6351	"transaction",
6352	"transaction_complete"
6353};
6354
6355static void print_binder_stats(struct seq_file *m, const char *prefix,
6356			       struct binder_stats *stats)
6357{
6358	int i;
6359
6360	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6361		     ARRAY_SIZE(binder_command_strings));
6362	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6363		int temp = atomic_read(&stats->bc[i]);
6364
6365		if (temp)
6366			seq_printf(m, "%s%s: %d\n", prefix,
6367				   binder_command_strings[i], temp);
6368	}
6369
6370	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6371		     ARRAY_SIZE(binder_return_strings));
6372	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6373		int temp = atomic_read(&stats->br[i]);
6374
6375		if (temp)
6376			seq_printf(m, "%s%s: %d\n", prefix,
6377				   binder_return_strings[i], temp);
6378	}
6379
6380	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6381		     ARRAY_SIZE(binder_objstat_strings));
6382	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6383		     ARRAY_SIZE(stats->obj_deleted));
6384	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6385		int created = atomic_read(&stats->obj_created[i]);
6386		int deleted = atomic_read(&stats->obj_deleted[i]);
6387
6388		if (created || deleted)
6389			seq_printf(m, "%s%s: active %d total %d\n",
6390				prefix,
6391				binder_objstat_strings[i],
6392				created - deleted,
6393				created);
6394	}
6395}
6396
6397static void print_binder_proc_stats(struct seq_file *m,
6398				    struct binder_proc *proc)
6399{
6400	struct binder_work *w;
6401	struct binder_thread *thread;
6402	struct rb_node *n;
6403	int count, strong, weak, ready_threads;
6404	size_t free_async_space =
6405		binder_alloc_get_free_async_space(&proc->alloc);
6406
6407	seq_printf(m, "proc %d\n", proc->pid);
6408	seq_printf(m, "context %s\n", proc->context->name);
6409	count = 0;
6410	ready_threads = 0;
6411	binder_inner_proc_lock(proc);
6412	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6413		count++;
6414
6415	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6416		ready_threads++;
6417
6418	seq_printf(m, "  threads: %d\n", count);
6419	seq_printf(m, "  requested threads: %d+%d/%d\n"
6420			"  ready threads %d\n"
6421			"  free async space %zd\n", proc->requested_threads,
6422			proc->requested_threads_started, proc->max_threads,
6423			ready_threads,
6424			free_async_space);
6425	count = 0;
6426	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6427		count++;
6428	binder_inner_proc_unlock(proc);
6429	seq_printf(m, "  nodes: %d\n", count);
6430	count = 0;
6431	strong = 0;
6432	weak = 0;
6433	binder_proc_lock(proc);
6434	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6435		struct binder_ref *ref = rb_entry(n, struct binder_ref,
6436						  rb_node_desc);
6437		count++;
6438		strong += ref->data.strong;
6439		weak += ref->data.weak;
6440	}
6441	binder_proc_unlock(proc);
6442	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
6443
6444	count = binder_alloc_get_allocated_count(&proc->alloc);
6445	seq_printf(m, "  buffers: %d\n", count);
6446
6447	binder_alloc_print_pages(m, &proc->alloc);
6448
6449	count = 0;
6450	binder_inner_proc_lock(proc);
6451	list_for_each_entry(w, &proc->todo, entry) {
6452		if (w->type == BINDER_WORK_TRANSACTION)
6453			count++;
6454	}
6455	binder_inner_proc_unlock(proc);
6456	seq_printf(m, "  pending transactions: %d\n", count);
6457
6458	print_binder_stats(m, "  ", &proc->stats);
6459}
6460
6461
6462int binder_state_show(struct seq_file *m, void *unused)
6463{
6464	struct binder_proc *proc;
6465	struct binder_node *node;
6466	struct binder_node *last_node = NULL;
6467
6468	seq_puts(m, "binder state:\n");
6469
6470	spin_lock(&binder_dead_nodes_lock);
6471	if (!hlist_empty(&binder_dead_nodes))
6472		seq_puts(m, "dead nodes:\n");
6473	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6474		/*
6475		 * take a temporary reference on the node so it
6476		 * survives and isn't removed from the list
6477		 * while we print it.
6478		 */
6479		node->tmp_refs++;
6480		spin_unlock(&binder_dead_nodes_lock);
6481		if (last_node)
6482			binder_put_node(last_node);
6483		binder_node_lock(node);
6484		print_binder_node_nilocked(m, node);
6485		binder_node_unlock(node);
6486		last_node = node;
6487		spin_lock(&binder_dead_nodes_lock);
6488	}
6489	spin_unlock(&binder_dead_nodes_lock);
6490	if (last_node)
6491		binder_put_node(last_node);
6492
6493	mutex_lock(&binder_procs_lock);
6494	hlist_for_each_entry(proc, &binder_procs, proc_node)
6495		print_binder_proc(m, proc, 1);
6496	mutex_unlock(&binder_procs_lock);
6497
6498	return 0;
6499}
6500
6501int binder_stats_show(struct seq_file *m, void *unused)
6502{
6503	struct binder_proc *proc;
6504
6505	seq_puts(m, "binder stats:\n");
6506
6507	print_binder_stats(m, "", &binder_stats);
6508
6509	mutex_lock(&binder_procs_lock);
6510	hlist_for_each_entry(proc, &binder_procs, proc_node)
6511		print_binder_proc_stats(m, proc);
6512	mutex_unlock(&binder_procs_lock);
6513
6514	return 0;
6515}
6516
6517int binder_transactions_show(struct seq_file *m, void *unused)
6518{
6519	struct binder_proc *proc;
6520
6521	seq_puts(m, "binder transactions:\n");
6522	mutex_lock(&binder_procs_lock);
6523	hlist_for_each_entry(proc, &binder_procs, proc_node)
6524		print_binder_proc(m, proc, 0);
6525	mutex_unlock(&binder_procs_lock);
6526
6527	return 0;
6528}
6529
6530static int proc_show(struct seq_file *m, void *unused)
6531{
6532	struct binder_proc *itr;
6533	int pid = (unsigned long)m->private;
6534
6535	mutex_lock(&binder_procs_lock);
6536	hlist_for_each_entry(itr, &binder_procs, proc_node) {
6537		if (itr->pid == pid) {
6538			seq_puts(m, "binder proc state:\n");
6539			print_binder_proc(m, itr, 1);
6540		}
6541	}
6542	mutex_unlock(&binder_procs_lock);
6543
6544	return 0;
6545}
6546
6547static void print_binder_transaction_log_entry(struct seq_file *m,
6548					struct binder_transaction_log_entry *e)
6549{
6550	int debug_id = READ_ONCE(e->debug_id_done);
6551	/*
6552	 * read barrier to guarantee debug_id_done read before
6553	 * we print the log values
6554	 */
6555	smp_rmb();
6556	seq_printf(m,
6557		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6558		   e->debug_id, (e->call_type == 2) ? "reply" :
6559		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6560		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
6561		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
6562		   e->return_error, e->return_error_param,
6563		   e->return_error_line);
6564	/*
6565	 * read-barrier to guarantee read of debug_id_done after
6566	 * done printing the fields of the entry
6567	 */
6568	smp_rmb();
6569	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6570			"\n" : " (incomplete)\n");
6571}
6572
6573int binder_transaction_log_show(struct seq_file *m, void *unused)
6574{
6575	struct binder_transaction_log *log = m->private;
6576	unsigned int log_cur = atomic_read(&log->cur);
6577	unsigned int count;
6578	unsigned int cur;
6579	int i;
6580
6581	count = log_cur + 1;
6582	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6583		0 : count % ARRAY_SIZE(log->entry);
6584	if (count > ARRAY_SIZE(log->entry) || log->full)
6585		count = ARRAY_SIZE(log->entry);
6586	for (i = 0; i < count; i++) {
6587		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6588
6589		print_binder_transaction_log_entry(m, &log->entry[index]);
6590	}
6591	return 0;
6592}
6593
6594const struct file_operations binder_fops = {
6595	.owner = THIS_MODULE,
6596	.poll = binder_poll,
6597	.unlocked_ioctl = binder_ioctl,
6598	.compat_ioctl = compat_ptr_ioctl,
6599	.mmap = binder_mmap,
6600	.open = binder_open,
6601	.flush = binder_flush,
6602	.release = binder_release,
6603	.may_pollfree = true,
6604};
6605
6606#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
6607static void print_binder_transaction_brief_ilocked(
6608				struct seq_file *m,
6609				const char *prefix, struct binder_transaction *t,
6610				u64 timestamp)
6611{
6612	struct binder_proc *to_proc = NULL;
6613	int from_pid = 0;
6614	int from_tid = 0;
6615	int to_pid = 0;
6616	u64 sec;
6617	u32 nsec;
6618
6619	spin_lock(&t->lock);
6620	to_proc = t->to_proc;
6621	from_pid = t->from ? (t->from->proc ? t->from->proc->pid : 0) : t->async_from_pid;
6622	from_tid = t->from ? t->from->pid : t->async_from_tid;
6623	to_pid = to_proc ? to_proc->pid : 0;
6624	sec = div_u64_rem((timestamp - t->timestamp), 1000000000, &nsec);
6625
6626	seq_printf(m,
6627		   "%s%d:%d to %d:%d code %x wait:%llu.%u s\n",
6628		   prefix,
6629		   from_pid, from_tid,
6630		   to_pid, t->to_thread ? t->to_thread->pid : 0,
6631		   t->code,
6632		   timestamp > t->timestamp ? sec : 0,
6633		   timestamp > t->timestamp ? nsec : 0);
6634	spin_unlock(&t->lock);
6635}
6636
6637static void print_binder_work_transaction_nilocked(struct seq_file *m,
6638				const char *prefix, struct binder_work *w,
6639				u64 timestamp)
6640{
6641	struct binder_transaction *t = NULL;
6642
6643	switch (w->type) {
6644	case BINDER_WORK_TRANSACTION:
6645		t = container_of(w, struct binder_transaction, work);
6646		print_binder_transaction_brief_ilocked(m, prefix, t, timestamp);
6647		break;
6648
6649	default:
6650		break;
6651	}
6652}
6653
6654static void print_binder_transaction_brief(struct seq_file *m,
6655				struct binder_proc *proc,
6656				u64 timestamp)
6657{
6658	struct binder_work *w = NULL;
6659	struct rb_node *n = NULL;
6660	struct binder_node *last_node = NULL;
6661	size_t start_pos = m->count;
6662	size_t header_pos = m->count;
6663
6664	/* sync binder / not one way */
6665	binder_inner_proc_lock(proc);
6666	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
6667		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
6668		struct binder_transaction *t = thread->transaction_stack;
6669		while (t) {
6670			if (t->from == thread) {
6671				print_binder_transaction_brief_ilocked(m, "\t", t, timestamp);
6672				t = t->from_parent;
6673			} else if (t->to_thread == thread) {
6674				t = t->to_parent;
6675			} else {
6676				t = NULL;
6677			}
6678		}
6679	}
6680
6681	/* async binder / one way */
6682	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
6683		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
6684		/*
6685		 * take a temporary reference on the node so it
6686		 * survives and isn't removed from the tree
6687		 * while we print it.
6688		 */
6689		binder_inc_node_tmpref_ilocked(node);
6690		/* Need to drop inner lock to take node lock */
6691		binder_inner_proc_unlock(proc);
6692		if (last_node)
6693			binder_put_node(last_node);
6694		binder_node_inner_lock(node);
6695		list_for_each_entry(w, &node->async_todo, entry)
6696			print_binder_work_transaction_nilocked(m, "async\t", w, timestamp);
6697		binder_node_inner_unlock(node);
6698		last_node = node;
6699		binder_inner_proc_lock(proc);
6700	}
6701	binder_inner_proc_unlock(proc);
6702
6703	if (last_node)
6704		binder_put_node(last_node);
6705
6706	if (m->count == header_pos)
6707		m->count = start_pos;
6708}
6709
6710static void print_binder_proc_brief(struct seq_file *m,
6711				struct binder_proc *proc)
6712{
6713	struct binder_thread *thread = NULL;
6714	int ready_threads = 0;
6715	size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc);
6716
6717	seq_printf(m, "%d\t", proc->pid);
6718	seq_printf(m, "%s\t", proc->context->name);
6719
6720	binder_inner_proc_lock(proc);
6721	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6722		ready_threads++;
6723
6724	seq_printf(m, "%d\t%d\t%d\t%d"
6725			"\t%zd\n", proc->requested_threads,
6726			proc->requested_threads_started, proc->max_threads,
6727			ready_threads,
6728			free_async_space);
6729	binder_inner_proc_unlock(proc);
6730}
6731
6732static int binder_transaction_proc_show(struct seq_file *m, void *unused)
6733{
6734	struct binder_proc *proc = NULL;
6735	u64 now = 0;
6736
6737	mutex_lock(&binder_procs_lock);
6738	now = binder_clock();
6739	hlist_for_each_entry(proc, &binder_procs, proc_node)
6740		print_binder_transaction_brief(m, proc, now);
6741
6742	seq_printf(m, "\npid\tcontext\t\trequest\tstarted\tmax\tready\tfree_async_space\n");
6743	hlist_for_each_entry(proc, &binder_procs, proc_node)
6744		print_binder_proc_brief(m, proc);
6745	mutex_unlock(&binder_procs_lock);
6746
6747	return 0;
6748}
6749
6750#endif
6751
6752static int __init init_binder_device(const char *name)
6753{
6754	int ret;
6755	struct binder_device *binder_device;
6756
6757	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6758	if (!binder_device)
6759		return -ENOMEM;
6760
6761	binder_device->miscdev.fops = &binder_fops;
6762	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6763	binder_device->miscdev.name = name;
6764
6765	refcount_set(&binder_device->ref, 1);
6766	binder_device->context.binder_context_mgr_uid = INVALID_UID;
6767	binder_device->context.name = name;
6768	mutex_init(&binder_device->context.context_mgr_node_lock);
6769
6770	ret = misc_register(&binder_device->miscdev);
6771	if (ret < 0) {
6772		kfree(binder_device);
6773		return ret;
6774	}
6775
6776	hlist_add_head(&binder_device->hlist, &binder_devices);
6777
6778	return ret;
6779}
6780
6781static int __init binder_init(void)
6782{
6783	int ret;
6784	char *device_name, *device_tmp;
6785	struct binder_device *device;
6786	struct hlist_node *tmp;
6787	char *device_names = NULL;
6788
6789	ret = binder_alloc_shrinker_init();
6790	if (ret)
6791		return ret;
6792
6793	atomic_set(&binder_transaction_log.cur, ~0U);
6794	atomic_set(&binder_transaction_log_failed.cur, ~0U);
6795
6796	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6797	if (binder_debugfs_dir_entry_root)
6798		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6799						 binder_debugfs_dir_entry_root);
6800
6801	if (binder_debugfs_dir_entry_root) {
6802		debugfs_create_file("state",
6803				    0444,
6804				    binder_debugfs_dir_entry_root,
6805				    NULL,
6806				    &binder_state_fops);
6807		debugfs_create_file("stats",
6808				    0444,
6809				    binder_debugfs_dir_entry_root,
6810				    NULL,
6811				    &binder_stats_fops);
6812		debugfs_create_file("transactions",
6813				    0444,
6814				    binder_debugfs_dir_entry_root,
6815				    NULL,
6816				    &binder_transactions_fops);
6817		debugfs_create_file("transaction_log",
6818				    0444,
6819				    binder_debugfs_dir_entry_root,
6820				    &binder_transaction_log,
6821				    &binder_transaction_log_fops);
6822		debugfs_create_file("failed_transaction_log",
6823				    0444,
6824				    binder_debugfs_dir_entry_root,
6825				    &binder_transaction_log_failed,
6826				    &binder_transaction_log_fops);
6827#ifdef CONFIG_BINDER_TRANSACTION_PROC_BRIEF
6828		proc_create_data("transaction_proc",
6829				 S_IRUGO,
6830				 NULL,
6831				 &binder_transaction_proc_proc_ops,
6832				 NULL);
6833#endif
6834	}
6835
6836	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6837	    strcmp(binder_devices_param, "") != 0) {
6838		/*
6839		* Copy the module_parameter string, because we don't want to
6840		* tokenize it in-place.
6841		 */
6842		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6843		if (!device_names) {
6844			ret = -ENOMEM;
6845			goto err_alloc_device_names_failed;
6846		}
6847
6848		device_tmp = device_names;
6849		while ((device_name = strsep(&device_tmp, ","))) {
6850			ret = init_binder_device(device_name);
6851			if (ret)
6852				goto err_init_binder_device_failed;
6853		}
6854	}
6855
6856	ret = init_binderfs();
6857	if (ret)
6858		goto err_init_binder_device_failed;
6859
6860	return ret;
6861
6862err_init_binder_device_failed:
6863	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6864		misc_deregister(&device->miscdev);
6865		hlist_del(&device->hlist);
6866		kfree(device);
6867	}
6868
6869	kfree(device_names);
6870
6871err_alloc_device_names_failed:
6872	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6873	binder_alloc_shrinker_exit();
6874
6875	return ret;
6876}
6877
6878device_initcall(binder_init);
6879
6880#define CREATE_TRACE_POINTS
6881#include "binder_trace.h"
6882
6883MODULE_LICENSE("GPL v2");
6884