cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

binder.c (186216B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/* binder.c
      3 *
      4 * Android IPC Subsystem
      5 *
      6 * Copyright (C) 2007-2008 Google, Inc.
      7 */
      8
      9/*
     10 * Locking overview
     11 *
     12 * There are 3 main spinlocks which must be acquired in the
     13 * order shown:
     14 *
     15 * 1) proc->outer_lock : protects binder_ref
     16 *    binder_proc_lock() and binder_proc_unlock() are
     17 *    used to acq/rel.
     18 * 2) node->lock : protects most fields of binder_node.
     19 *    binder_node_lock() and binder_node_unlock() are
     20 *    used to acq/rel
     21 * 3) proc->inner_lock : protects the thread and node lists
     22 *    (proc->threads, proc->waiting_threads, proc->nodes)
     23 *    and all todo lists associated with the binder_proc
     24 *    (proc->todo, thread->todo, proc->delivered_death and
     25 *    node->async_todo), as well as thread->transaction_stack
     26 *    binder_inner_proc_lock() and binder_inner_proc_unlock()
     27 *    are used to acq/rel
     28 *
     29 * Any lock under procA must never be nested under any lock at the same
     30 * level or below on procB.
     31 *
     32 * Functions that require a lock held on entry indicate which lock
     33 * in the suffix of the function name:
     34 *
     35 * foo_olocked() : requires node->outer_lock
     36 * foo_nlocked() : requires node->lock
     37 * foo_ilocked() : requires proc->inner_lock
     38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
     39 * foo_nilocked(): requires node->lock and proc->inner_lock
     40 * ...
     41 */
     42
     43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     44
     45#include <linux/fdtable.h>
     46#include <linux/file.h>
     47#include <linux/freezer.h>
     48#include <linux/fs.h>
     49#include <linux/list.h>
     50#include <linux/miscdevice.h>
     51#include <linux/module.h>
     52#include <linux/mutex.h>
     53#include <linux/nsproxy.h>
     54#include <linux/poll.h>
     55#include <linux/debugfs.h>
     56#include <linux/rbtree.h>
     57#include <linux/sched/signal.h>
     58#include <linux/sched/mm.h>
     59#include <linux/seq_file.h>
     60#include <linux/string.h>
     61#include <linux/uaccess.h>
     62#include <linux/pid_namespace.h>
     63#include <linux/security.h>
     64#include <linux/spinlock.h>
     65#include <linux/ratelimit.h>
     66#include <linux/syscalls.h>
     67#include <linux/task_work.h>
     68#include <linux/sizes.h>
     69
     70#include <uapi/linux/android/binder.h>
     71
     72#include <linux/cacheflush.h>
     73
     74#include "binder_internal.h"
     75#include "binder_trace.h"
     76
     77static HLIST_HEAD(binder_deferred_list);
     78static DEFINE_MUTEX(binder_deferred_lock);
     79
     80static HLIST_HEAD(binder_devices);
     81static HLIST_HEAD(binder_procs);
     82static DEFINE_MUTEX(binder_procs_lock);
     83
     84static HLIST_HEAD(binder_dead_nodes);
     85static DEFINE_SPINLOCK(binder_dead_nodes_lock);
     86
     87static struct dentry *binder_debugfs_dir_entry_root;
     88static struct dentry *binder_debugfs_dir_entry_proc;
     89static atomic_t binder_last_id;
     90
     91static int proc_show(struct seq_file *m, void *unused);
     92DEFINE_SHOW_ATTRIBUTE(proc);
     93
     94#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
     95
     96enum {
     97	BINDER_DEBUG_USER_ERROR             = 1U << 0,
     98	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
     99	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
    100	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
    101	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
    102	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
    103	BINDER_DEBUG_READ_WRITE             = 1U << 6,
    104	BINDER_DEBUG_USER_REFS              = 1U << 7,
    105	BINDER_DEBUG_THREADS                = 1U << 8,
    106	BINDER_DEBUG_TRANSACTION            = 1U << 9,
    107	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
    108	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
    109	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
    110	BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
    111	BINDER_DEBUG_SPINLOCKS              = 1U << 14,
    112};
    113static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
    114	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
    115module_param_named(debug_mask, binder_debug_mask, uint, 0644);
    116
    117char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
    118module_param_named(devices, binder_devices_param, charp, 0444);
    119
    120static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
    121static int binder_stop_on_user_error;
    122
    123static int binder_set_stop_on_user_error(const char *val,
    124					 const struct kernel_param *kp)
    125{
    126	int ret;
    127
    128	ret = param_set_int(val, kp);
    129	if (binder_stop_on_user_error < 2)
    130		wake_up(&binder_user_error_wait);
    131	return ret;
    132}
    133module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
    134	param_get_int, &binder_stop_on_user_error, 0644);
    135
    136static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
    137{
    138	struct va_format vaf;
    139	va_list args;
    140
    141	if (binder_debug_mask & mask) {
    142		va_start(args, format);
    143		vaf.va = &args;
    144		vaf.fmt = format;
    145		pr_info_ratelimited("%pV", &vaf);
    146		va_end(args);
    147	}
    148}
    149
    150#define binder_txn_error(x...) \
    151	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
    152
    153static __printf(1, 2) void binder_user_error(const char *format, ...)
    154{
    155	struct va_format vaf;
    156	va_list args;
    157
    158	if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
    159		va_start(args, format);
    160		vaf.va = &args;
    161		vaf.fmt = format;
    162		pr_info_ratelimited("%pV", &vaf);
    163		va_end(args);
    164	}
    165
    166	if (binder_stop_on_user_error)
    167		binder_stop_on_user_error = 2;
    168}
    169
    170#define binder_set_extended_error(ee, _id, _command, _param) \
    171	do { \
    172		(ee)->id = _id; \
    173		(ee)->command = _command; \
    174		(ee)->param = _param; \
    175	} while (0)
    176
    177#define to_flat_binder_object(hdr) \
    178	container_of(hdr, struct flat_binder_object, hdr)
    179
    180#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
    181
    182#define to_binder_buffer_object(hdr) \
    183	container_of(hdr, struct binder_buffer_object, hdr)
    184
    185#define to_binder_fd_array_object(hdr) \
    186	container_of(hdr, struct binder_fd_array_object, hdr)
    187
    188static struct binder_stats binder_stats;
    189
    190static inline void binder_stats_deleted(enum binder_stat_types type)
    191{
    192	atomic_inc(&binder_stats.obj_deleted[type]);
    193}
    194
    195static inline void binder_stats_created(enum binder_stat_types type)
    196{
    197	atomic_inc(&binder_stats.obj_created[type]);
    198}
    199
    200struct binder_transaction_log binder_transaction_log;
    201struct binder_transaction_log binder_transaction_log_failed;
    202
    203static struct binder_transaction_log_entry *binder_transaction_log_add(
    204	struct binder_transaction_log *log)
    205{
    206	struct binder_transaction_log_entry *e;
    207	unsigned int cur = atomic_inc_return(&log->cur);
    208
    209	if (cur >= ARRAY_SIZE(log->entry))
    210		log->full = true;
    211	e = &log->entry[cur % ARRAY_SIZE(log->entry)];
    212	WRITE_ONCE(e->debug_id_done, 0);
    213	/*
    214	 * write-barrier to synchronize access to e->debug_id_done.
    215	 * We make sure the initialized 0 value is seen before
    216	 * memset() other fields are zeroed by memset.
    217	 */
    218	smp_wmb();
    219	memset(e, 0, sizeof(*e));
    220	return e;
    221}
    222
    223enum binder_deferred_state {
    224	BINDER_DEFERRED_FLUSH        = 0x01,
    225	BINDER_DEFERRED_RELEASE      = 0x02,
    226};
    227
    228enum {
    229	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
    230	BINDER_LOOPER_STATE_ENTERED     = 0x02,
    231	BINDER_LOOPER_STATE_EXITED      = 0x04,
    232	BINDER_LOOPER_STATE_INVALID     = 0x08,
    233	BINDER_LOOPER_STATE_WAITING     = 0x10,
    234	BINDER_LOOPER_STATE_POLL        = 0x20,
    235};
    236
    237/**
    238 * binder_proc_lock() - Acquire outer lock for given binder_proc
    239 * @proc:         struct binder_proc to acquire
    240 *
    241 * Acquires proc->outer_lock. Used to protect binder_ref
    242 * structures associated with the given proc.
    243 */
    244#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
    245static void
    246_binder_proc_lock(struct binder_proc *proc, int line)
    247	__acquires(&proc->outer_lock)
    248{
    249	binder_debug(BINDER_DEBUG_SPINLOCKS,
    250		     "%s: line=%d\n", __func__, line);
    251	spin_lock(&proc->outer_lock);
    252}
    253
    254/**
    255 * binder_proc_unlock() - Release spinlock for given binder_proc
    256 * @proc:         struct binder_proc to acquire
    257 *
    258 * Release lock acquired via binder_proc_lock()
    259 */
    260#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
    261static void
    262_binder_proc_unlock(struct binder_proc *proc, int line)
    263	__releases(&proc->outer_lock)
    264{
    265	binder_debug(BINDER_DEBUG_SPINLOCKS,
    266		     "%s: line=%d\n", __func__, line);
    267	spin_unlock(&proc->outer_lock);
    268}
    269
    270/**
    271 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
    272 * @proc:         struct binder_proc to acquire
    273 *
    274 * Acquires proc->inner_lock. Used to protect todo lists
    275 */
    276#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
    277static void
    278_binder_inner_proc_lock(struct binder_proc *proc, int line)
    279	__acquires(&proc->inner_lock)
    280{
    281	binder_debug(BINDER_DEBUG_SPINLOCKS,
    282		     "%s: line=%d\n", __func__, line);
    283	spin_lock(&proc->inner_lock);
    284}
    285
    286/**
    287 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
    288 * @proc:         struct binder_proc to acquire
    289 *
    290 * Release lock acquired via binder_inner_proc_lock()
    291 */
    292#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
    293static void
    294_binder_inner_proc_unlock(struct binder_proc *proc, int line)
    295	__releases(&proc->inner_lock)
    296{
    297	binder_debug(BINDER_DEBUG_SPINLOCKS,
    298		     "%s: line=%d\n", __func__, line);
    299	spin_unlock(&proc->inner_lock);
    300}
    301
    302/**
    303 * binder_node_lock() - Acquire spinlock for given binder_node
    304 * @node:         struct binder_node to acquire
    305 *
    306 * Acquires node->lock. Used to protect binder_node fields
    307 */
    308#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
    309static void
    310_binder_node_lock(struct binder_node *node, int line)
    311	__acquires(&node->lock)
    312{
    313	binder_debug(BINDER_DEBUG_SPINLOCKS,
    314		     "%s: line=%d\n", __func__, line);
    315	spin_lock(&node->lock);
    316}
    317
    318/**
    319 * binder_node_unlock() - Release spinlock for given binder_proc
    320 * @node:         struct binder_node to acquire
    321 *
    322 * Release lock acquired via binder_node_lock()
    323 */
    324#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
    325static void
    326_binder_node_unlock(struct binder_node *node, int line)
    327	__releases(&node->lock)
    328{
    329	binder_debug(BINDER_DEBUG_SPINLOCKS,
    330		     "%s: line=%d\n", __func__, line);
    331	spin_unlock(&node->lock);
    332}
    333
    334/**
    335 * binder_node_inner_lock() - Acquire node and inner locks
    336 * @node:         struct binder_node to acquire
    337 *
    338 * Acquires node->lock. If node->proc also acquires
    339 * proc->inner_lock. Used to protect binder_node fields
    340 */
    341#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
    342static void
    343_binder_node_inner_lock(struct binder_node *node, int line)
    344	__acquires(&node->lock) __acquires(&node->proc->inner_lock)
    345{
    346	binder_debug(BINDER_DEBUG_SPINLOCKS,
    347		     "%s: line=%d\n", __func__, line);
    348	spin_lock(&node->lock);
    349	if (node->proc)
    350		binder_inner_proc_lock(node->proc);
    351	else
    352		/* annotation for sparse */
    353		__acquire(&node->proc->inner_lock);
    354}
    355
    356/**
    357 * binder_node_unlock() - Release node and inner locks
    358 * @node:         struct binder_node to acquire
    359 *
    360 * Release lock acquired via binder_node_lock()
    361 */
    362#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
    363static void
    364_binder_node_inner_unlock(struct binder_node *node, int line)
    365	__releases(&node->lock) __releases(&node->proc->inner_lock)
    366{
    367	struct binder_proc *proc = node->proc;
    368
    369	binder_debug(BINDER_DEBUG_SPINLOCKS,
    370		     "%s: line=%d\n", __func__, line);
    371	if (proc)
    372		binder_inner_proc_unlock(proc);
    373	else
    374		/* annotation for sparse */
    375		__release(&node->proc->inner_lock);
    376	spin_unlock(&node->lock);
    377}
    378
    379static bool binder_worklist_empty_ilocked(struct list_head *list)
    380{
    381	return list_empty(list);
    382}
    383
    384/**
    385 * binder_worklist_empty() - Check if no items on the work list
    386 * @proc:       binder_proc associated with list
    387 * @list:	list to check
    388 *
    389 * Return: true if there are no items on list, else false
    390 */
    391static bool binder_worklist_empty(struct binder_proc *proc,
    392				  struct list_head *list)
    393{
    394	bool ret;
    395
    396	binder_inner_proc_lock(proc);
    397	ret = binder_worklist_empty_ilocked(list);
    398	binder_inner_proc_unlock(proc);
    399	return ret;
    400}
    401
    402/**
    403 * binder_enqueue_work_ilocked() - Add an item to the work list
    404 * @work:         struct binder_work to add to list
    405 * @target_list:  list to add work to
    406 *
    407 * Adds the work to the specified list. Asserts that work
    408 * is not already on a list.
    409 *
    410 * Requires the proc->inner_lock to be held.
    411 */
    412static void
    413binder_enqueue_work_ilocked(struct binder_work *work,
    414			   struct list_head *target_list)
    415{
    416	BUG_ON(target_list == NULL);
    417	BUG_ON(work->entry.next && !list_empty(&work->entry));
    418	list_add_tail(&work->entry, target_list);
    419}
    420
    421/**
    422 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
    423 * @thread:       thread to queue work to
    424 * @work:         struct binder_work to add to list
    425 *
    426 * Adds the work to the todo list of the thread. Doesn't set the process_todo
    427 * flag, which means that (if it wasn't already set) the thread will go to
    428 * sleep without handling this work when it calls read.
    429 *
    430 * Requires the proc->inner_lock to be held.
    431 */
    432static void
    433binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
    434					    struct binder_work *work)
    435{
    436	WARN_ON(!list_empty(&thread->waiting_thread_node));
    437	binder_enqueue_work_ilocked(work, &thread->todo);
    438}
    439
    440/**
    441 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
    442 * @thread:       thread to queue work to
    443 * @work:         struct binder_work to add to list
    444 *
    445 * Adds the work to the todo list of the thread, and enables processing
    446 * of the todo queue.
    447 *
    448 * Requires the proc->inner_lock to be held.
    449 */
    450static void
    451binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
    452				   struct binder_work *work)
    453{
    454	WARN_ON(!list_empty(&thread->waiting_thread_node));
    455	binder_enqueue_work_ilocked(work, &thread->todo);
    456	thread->process_todo = true;
    457}
    458
    459/**
    460 * binder_enqueue_thread_work() - Add an item to the thread work list
    461 * @thread:       thread to queue work to
    462 * @work:         struct binder_work to add to list
    463 *
    464 * Adds the work to the todo list of the thread, and enables processing
    465 * of the todo queue.
    466 */
    467static void
    468binder_enqueue_thread_work(struct binder_thread *thread,
    469			   struct binder_work *work)
    470{
    471	binder_inner_proc_lock(thread->proc);
    472	binder_enqueue_thread_work_ilocked(thread, work);
    473	binder_inner_proc_unlock(thread->proc);
    474}
    475
    476static void
    477binder_dequeue_work_ilocked(struct binder_work *work)
    478{
    479	list_del_init(&work->entry);
    480}
    481
    482/**
    483 * binder_dequeue_work() - Removes an item from the work list
    484 * @proc:         binder_proc associated with list
    485 * @work:         struct binder_work to remove from list
    486 *
    487 * Removes the specified work item from whatever list it is on.
    488 * Can safely be called if work is not on any list.
    489 */
    490static void
    491binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
    492{
    493	binder_inner_proc_lock(proc);
    494	binder_dequeue_work_ilocked(work);
    495	binder_inner_proc_unlock(proc);
    496}
    497
    498static struct binder_work *binder_dequeue_work_head_ilocked(
    499					struct list_head *list)
    500{
    501	struct binder_work *w;
    502
    503	w = list_first_entry_or_null(list, struct binder_work, entry);
    504	if (w)
    505		list_del_init(&w->entry);
    506	return w;
    507}
    508
    509static void
    510binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
    511static void binder_free_thread(struct binder_thread *thread);
    512static void binder_free_proc(struct binder_proc *proc);
    513static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
    514
    515static bool binder_has_work_ilocked(struct binder_thread *thread,
    516				    bool do_proc_work)
    517{
    518	return thread->process_todo ||
    519		thread->looper_need_return ||
    520		(do_proc_work &&
    521		 !binder_worklist_empty_ilocked(&thread->proc->todo));
    522}
    523
    524static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
    525{
    526	bool has_work;
    527
    528	binder_inner_proc_lock(thread->proc);
    529	has_work = binder_has_work_ilocked(thread, do_proc_work);
    530	binder_inner_proc_unlock(thread->proc);
    531
    532	return has_work;
    533}
    534
    535static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
    536{
    537	return !thread->transaction_stack &&
    538		binder_worklist_empty_ilocked(&thread->todo) &&
    539		(thread->looper & (BINDER_LOOPER_STATE_ENTERED |
    540				   BINDER_LOOPER_STATE_REGISTERED));
    541}
    542
    543static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
    544					       bool sync)
    545{
    546	struct rb_node *n;
    547	struct binder_thread *thread;
    548
    549	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
    550		thread = rb_entry(n, struct binder_thread, rb_node);
    551		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
    552		    binder_available_for_proc_work_ilocked(thread)) {
    553			if (sync)
    554				wake_up_interruptible_sync(&thread->wait);
    555			else
    556				wake_up_interruptible(&thread->wait);
    557		}
    558	}
    559}
    560
    561/**
    562 * binder_select_thread_ilocked() - selects a thread for doing proc work.
    563 * @proc:	process to select a thread from
    564 *
    565 * Note that calling this function moves the thread off the waiting_threads
    566 * list, so it can only be woken up by the caller of this function, or a
    567 * signal. Therefore, callers *should* always wake up the thread this function
    568 * returns.
    569 *
    570 * Return:	If there's a thread currently waiting for process work,
    571 *		returns that thread. Otherwise returns NULL.
    572 */
    573static struct binder_thread *
    574binder_select_thread_ilocked(struct binder_proc *proc)
    575{
    576	struct binder_thread *thread;
    577
    578	assert_spin_locked(&proc->inner_lock);
    579	thread = list_first_entry_or_null(&proc->waiting_threads,
    580					  struct binder_thread,
    581					  waiting_thread_node);
    582
    583	if (thread)
    584		list_del_init(&thread->waiting_thread_node);
    585
    586	return thread;
    587}
    588
    589/**
    590 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
    591 * @proc:	process to wake up a thread in
    592 * @thread:	specific thread to wake-up (may be NULL)
    593 * @sync:	whether to do a synchronous wake-up
    594 *
    595 * This function wakes up a thread in the @proc process.
    596 * The caller may provide a specific thread to wake-up in
    597 * the @thread parameter. If @thread is NULL, this function
    598 * will wake up threads that have called poll().
    599 *
    600 * Note that for this function to work as expected, callers
    601 * should first call binder_select_thread() to find a thread
    602 * to handle the work (if they don't have a thread already),
    603 * and pass the result into the @thread parameter.
    604 */
    605static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
    606					 struct binder_thread *thread,
    607					 bool sync)
    608{
    609	assert_spin_locked(&proc->inner_lock);
    610
    611	if (thread) {
    612		if (sync)
    613			wake_up_interruptible_sync(&thread->wait);
    614		else
    615			wake_up_interruptible(&thread->wait);
    616		return;
    617	}
    618
    619	/* Didn't find a thread waiting for proc work; this can happen
    620	 * in two scenarios:
    621	 * 1. All threads are busy handling transactions
    622	 *    In that case, one of those threads should call back into
    623	 *    the kernel driver soon and pick up this work.
    624	 * 2. Threads are using the (e)poll interface, in which case
    625	 *    they may be blocked on the waitqueue without having been
    626	 *    added to waiting_threads. For this case, we just iterate
    627	 *    over all threads not handling transaction work, and
    628	 *    wake them all up. We wake all because we don't know whether
    629	 *    a thread that called into (e)poll is handling non-binder
    630	 *    work currently.
    631	 */
    632	binder_wakeup_poll_threads_ilocked(proc, sync);
    633}
    634
    635static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
    636{
    637	struct binder_thread *thread = binder_select_thread_ilocked(proc);
    638
    639	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
    640}
    641
    642static void binder_set_nice(long nice)
    643{
    644	long min_nice;
    645
    646	if (can_nice(current, nice)) {
    647		set_user_nice(current, nice);
    648		return;
    649	}
    650	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
    651	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
    652		     "%d: nice value %ld not allowed use %ld instead\n",
    653		      current->pid, nice, min_nice);
    654	set_user_nice(current, min_nice);
    655	if (min_nice <= MAX_NICE)
    656		return;
    657	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
    658}
    659
    660static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
    661						   binder_uintptr_t ptr)
    662{
    663	struct rb_node *n = proc->nodes.rb_node;
    664	struct binder_node *node;
    665
    666	assert_spin_locked(&proc->inner_lock);
    667
    668	while (n) {
    669		node = rb_entry(n, struct binder_node, rb_node);
    670
    671		if (ptr < node->ptr)
    672			n = n->rb_left;
    673		else if (ptr > node->ptr)
    674			n = n->rb_right;
    675		else {
    676			/*
    677			 * take an implicit weak reference
    678			 * to ensure node stays alive until
    679			 * call to binder_put_node()
    680			 */
    681			binder_inc_node_tmpref_ilocked(node);
    682			return node;
    683		}
    684	}
    685	return NULL;
    686}
    687
    688static struct binder_node *binder_get_node(struct binder_proc *proc,
    689					   binder_uintptr_t ptr)
    690{
    691	struct binder_node *node;
    692
    693	binder_inner_proc_lock(proc);
    694	node = binder_get_node_ilocked(proc, ptr);
    695	binder_inner_proc_unlock(proc);
    696	return node;
    697}
    698
    699static struct binder_node *binder_init_node_ilocked(
    700						struct binder_proc *proc,
    701						struct binder_node *new_node,
    702						struct flat_binder_object *fp)
    703{
    704	struct rb_node **p = &proc->nodes.rb_node;
    705	struct rb_node *parent = NULL;
    706	struct binder_node *node;
    707	binder_uintptr_t ptr = fp ? fp->binder : 0;
    708	binder_uintptr_t cookie = fp ? fp->cookie : 0;
    709	__u32 flags = fp ? fp->flags : 0;
    710
    711	assert_spin_locked(&proc->inner_lock);
    712
    713	while (*p) {
    714
    715		parent = *p;
    716		node = rb_entry(parent, struct binder_node, rb_node);
    717
    718		if (ptr < node->ptr)
    719			p = &(*p)->rb_left;
    720		else if (ptr > node->ptr)
    721			p = &(*p)->rb_right;
    722		else {
    723			/*
    724			 * A matching node is already in
    725			 * the rb tree. Abandon the init
    726			 * and return it.
    727			 */
    728			binder_inc_node_tmpref_ilocked(node);
    729			return node;
    730		}
    731	}
    732	node = new_node;
    733	binder_stats_created(BINDER_STAT_NODE);
    734	node->tmp_refs++;
    735	rb_link_node(&node->rb_node, parent, p);
    736	rb_insert_color(&node->rb_node, &proc->nodes);
    737	node->debug_id = atomic_inc_return(&binder_last_id);
    738	node->proc = proc;
    739	node->ptr = ptr;
    740	node->cookie = cookie;
    741	node->work.type = BINDER_WORK_NODE;
    742	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
    743	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
    744	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
    745	spin_lock_init(&node->lock);
    746	INIT_LIST_HEAD(&node->work.entry);
    747	INIT_LIST_HEAD(&node->async_todo);
    748	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
    749		     "%d:%d node %d u%016llx c%016llx created\n",
    750		     proc->pid, current->pid, node->debug_id,
    751		     (u64)node->ptr, (u64)node->cookie);
    752
    753	return node;
    754}
    755
    756static struct binder_node *binder_new_node(struct binder_proc *proc,
    757					   struct flat_binder_object *fp)
    758{
    759	struct binder_node *node;
    760	struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
    761
    762	if (!new_node)
    763		return NULL;
    764	binder_inner_proc_lock(proc);
    765	node = binder_init_node_ilocked(proc, new_node, fp);
    766	binder_inner_proc_unlock(proc);
    767	if (node != new_node)
    768		/*
    769		 * The node was already added by another thread
    770		 */
    771		kfree(new_node);
    772
    773	return node;
    774}
    775
    776static void binder_free_node(struct binder_node *node)
    777{
    778	kfree(node);
    779	binder_stats_deleted(BINDER_STAT_NODE);
    780}
    781
    782static int binder_inc_node_nilocked(struct binder_node *node, int strong,
    783				    int internal,
    784				    struct list_head *target_list)
    785{
    786	struct binder_proc *proc = node->proc;
    787
    788	assert_spin_locked(&node->lock);
    789	if (proc)
    790		assert_spin_locked(&proc->inner_lock);
    791	if (strong) {
    792		if (internal) {
    793			if (target_list == NULL &&
    794			    node->internal_strong_refs == 0 &&
    795			    !(node->proc &&
    796			      node == node->proc->context->binder_context_mgr_node &&
    797			      node->has_strong_ref)) {
    798				pr_err("invalid inc strong node for %d\n",
    799					node->debug_id);
    800				return -EINVAL;
    801			}
    802			node->internal_strong_refs++;
    803		} else
    804			node->local_strong_refs++;
    805		if (!node->has_strong_ref && target_list) {
    806			struct binder_thread *thread = container_of(target_list,
    807						    struct binder_thread, todo);
    808			binder_dequeue_work_ilocked(&node->work);
    809			BUG_ON(&thread->todo != target_list);
    810			binder_enqueue_deferred_thread_work_ilocked(thread,
    811								   &node->work);
    812		}
    813	} else {
    814		if (!internal)
    815			node->local_weak_refs++;
    816		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
    817			if (target_list == NULL) {
    818				pr_err("invalid inc weak node for %d\n",
    819					node->debug_id);
    820				return -EINVAL;
    821			}
    822			/*
    823			 * See comment above
    824			 */
    825			binder_enqueue_work_ilocked(&node->work, target_list);
    826		}
    827	}
    828	return 0;
    829}
    830
    831static int binder_inc_node(struct binder_node *node, int strong, int internal,
    832			   struct list_head *target_list)
    833{
    834	int ret;
    835
    836	binder_node_inner_lock(node);
    837	ret = binder_inc_node_nilocked(node, strong, internal, target_list);
    838	binder_node_inner_unlock(node);
    839
    840	return ret;
    841}
    842
    843static bool binder_dec_node_nilocked(struct binder_node *node,
    844				     int strong, int internal)
    845{
    846	struct binder_proc *proc = node->proc;
    847
    848	assert_spin_locked(&node->lock);
    849	if (proc)
    850		assert_spin_locked(&proc->inner_lock);
    851	if (strong) {
    852		if (internal)
    853			node->internal_strong_refs--;
    854		else
    855			node->local_strong_refs--;
    856		if (node->local_strong_refs || node->internal_strong_refs)
    857			return false;
    858	} else {
    859		if (!internal)
    860			node->local_weak_refs--;
    861		if (node->local_weak_refs || node->tmp_refs ||
    862				!hlist_empty(&node->refs))
    863			return false;
    864	}
    865
    866	if (proc && (node->has_strong_ref || node->has_weak_ref)) {
    867		if (list_empty(&node->work.entry)) {
    868			binder_enqueue_work_ilocked(&node->work, &proc->todo);
    869			binder_wakeup_proc_ilocked(proc);
    870		}
    871	} else {
    872		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
    873		    !node->local_weak_refs && !node->tmp_refs) {
    874			if (proc) {
    875				binder_dequeue_work_ilocked(&node->work);
    876				rb_erase(&node->rb_node, &proc->nodes);
    877				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
    878					     "refless node %d deleted\n",
    879					     node->debug_id);
    880			} else {
    881				BUG_ON(!list_empty(&node->work.entry));
    882				spin_lock(&binder_dead_nodes_lock);
    883				/*
    884				 * tmp_refs could have changed so
    885				 * check it again
    886				 */
    887				if (node->tmp_refs) {
    888					spin_unlock(&binder_dead_nodes_lock);
    889					return false;
    890				}
    891				hlist_del(&node->dead_node);
    892				spin_unlock(&binder_dead_nodes_lock);
    893				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
    894					     "dead node %d deleted\n",
    895					     node->debug_id);
    896			}
    897			return true;
    898		}
    899	}
    900	return false;
    901}
    902
    903static void binder_dec_node(struct binder_node *node, int strong, int internal)
    904{
    905	bool free_node;
    906
    907	binder_node_inner_lock(node);
    908	free_node = binder_dec_node_nilocked(node, strong, internal);
    909	binder_node_inner_unlock(node);
    910	if (free_node)
    911		binder_free_node(node);
    912}
    913
    914static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
    915{
    916	/*
    917	 * No call to binder_inc_node() is needed since we
    918	 * don't need to inform userspace of any changes to
    919	 * tmp_refs
    920	 */
    921	node->tmp_refs++;
    922}
    923
    924/**
    925 * binder_inc_node_tmpref() - take a temporary reference on node
    926 * @node:	node to reference
    927 *
    928 * Take reference on node to prevent the node from being freed
    929 * while referenced only by a local variable. The inner lock is
    930 * needed to serialize with the node work on the queue (which
    931 * isn't needed after the node is dead). If the node is dead
    932 * (node->proc is NULL), use binder_dead_nodes_lock to protect
    933 * node->tmp_refs against dead-node-only cases where the node
    934 * lock cannot be acquired (eg traversing the dead node list to
    935 * print nodes)
    936 */
    937static void binder_inc_node_tmpref(struct binder_node *node)
    938{
    939	binder_node_lock(node);
    940	if (node->proc)
    941		binder_inner_proc_lock(node->proc);
    942	else
    943		spin_lock(&binder_dead_nodes_lock);
    944	binder_inc_node_tmpref_ilocked(node);
    945	if (node->proc)
    946		binder_inner_proc_unlock(node->proc);
    947	else
    948		spin_unlock(&binder_dead_nodes_lock);
    949	binder_node_unlock(node);
    950}
    951
    952/**
    953 * binder_dec_node_tmpref() - remove a temporary reference on node
    954 * @node:	node to reference
    955 *
    956 * Release temporary reference on node taken via binder_inc_node_tmpref()
    957 */
    958static void binder_dec_node_tmpref(struct binder_node *node)
    959{
    960	bool free_node;
    961
    962	binder_node_inner_lock(node);
    963	if (!node->proc)
    964		spin_lock(&binder_dead_nodes_lock);
    965	else
    966		__acquire(&binder_dead_nodes_lock);
    967	node->tmp_refs--;
    968	BUG_ON(node->tmp_refs < 0);
    969	if (!node->proc)
    970		spin_unlock(&binder_dead_nodes_lock);
    971	else
    972		__release(&binder_dead_nodes_lock);
    973	/*
    974	 * Call binder_dec_node() to check if all refcounts are 0
    975	 * and cleanup is needed. Calling with strong=0 and internal=1
    976	 * causes no actual reference to be released in binder_dec_node().
    977	 * If that changes, a change is needed here too.
    978	 */
    979	free_node = binder_dec_node_nilocked(node, 0, 1);
    980	binder_node_inner_unlock(node);
    981	if (free_node)
    982		binder_free_node(node);
    983}
    984
    985static void binder_put_node(struct binder_node *node)
    986{
    987	binder_dec_node_tmpref(node);
    988}
    989
    990static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
    991						 u32 desc, bool need_strong_ref)
    992{
    993	struct rb_node *n = proc->refs_by_desc.rb_node;
    994	struct binder_ref *ref;
    995
    996	while (n) {
    997		ref = rb_entry(n, struct binder_ref, rb_node_desc);
    998
    999		if (desc < ref->data.desc) {
   1000			n = n->rb_left;
   1001		} else if (desc > ref->data.desc) {
   1002			n = n->rb_right;
   1003		} else if (need_strong_ref && !ref->data.strong) {
   1004			binder_user_error("tried to use weak ref as strong ref\n");
   1005			return NULL;
   1006		} else {
   1007			return ref;
   1008		}
   1009	}
   1010	return NULL;
   1011}
   1012
   1013/**
   1014 * binder_get_ref_for_node_olocked() - get the ref associated with given node
   1015 * @proc:	binder_proc that owns the ref
   1016 * @node:	binder_node of target
   1017 * @new_ref:	newly allocated binder_ref to be initialized or %NULL
   1018 *
   1019 * Look up the ref for the given node and return it if it exists
   1020 *
   1021 * If it doesn't exist and the caller provides a newly allocated
   1022 * ref, initialize the fields of the newly allocated ref and insert
   1023 * into the given proc rb_trees and node refs list.
   1024 *
   1025 * Return:	the ref for node. It is possible that another thread
   1026 *		allocated/initialized the ref first in which case the
   1027 *		returned ref would be different than the passed-in
   1028 *		new_ref. new_ref must be kfree'd by the caller in
   1029 *		this case.
   1030 */
   1031static struct binder_ref *binder_get_ref_for_node_olocked(
   1032					struct binder_proc *proc,
   1033					struct binder_node *node,
   1034					struct binder_ref *new_ref)
   1035{
   1036	struct binder_context *context = proc->context;
   1037	struct rb_node **p = &proc->refs_by_node.rb_node;
   1038	struct rb_node *parent = NULL;
   1039	struct binder_ref *ref;
   1040	struct rb_node *n;
   1041
   1042	while (*p) {
   1043		parent = *p;
   1044		ref = rb_entry(parent, struct binder_ref, rb_node_node);
   1045
   1046		if (node < ref->node)
   1047			p = &(*p)->rb_left;
   1048		else if (node > ref->node)
   1049			p = &(*p)->rb_right;
   1050		else
   1051			return ref;
   1052	}
   1053	if (!new_ref)
   1054		return NULL;
   1055
   1056	binder_stats_created(BINDER_STAT_REF);
   1057	new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
   1058	new_ref->proc = proc;
   1059	new_ref->node = node;
   1060	rb_link_node(&new_ref->rb_node_node, parent, p);
   1061	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
   1062
   1063	new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
   1064	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
   1065		ref = rb_entry(n, struct binder_ref, rb_node_desc);
   1066		if (ref->data.desc > new_ref->data.desc)
   1067			break;
   1068		new_ref->data.desc = ref->data.desc + 1;
   1069	}
   1070
   1071	p = &proc->refs_by_desc.rb_node;
   1072	while (*p) {
   1073		parent = *p;
   1074		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
   1075
   1076		if (new_ref->data.desc < ref->data.desc)
   1077			p = &(*p)->rb_left;
   1078		else if (new_ref->data.desc > ref->data.desc)
   1079			p = &(*p)->rb_right;
   1080		else
   1081			BUG();
   1082	}
   1083	rb_link_node(&new_ref->rb_node_desc, parent, p);
   1084	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
   1085
   1086	binder_node_lock(node);
   1087	hlist_add_head(&new_ref->node_entry, &node->refs);
   1088
   1089	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
   1090		     "%d new ref %d desc %d for node %d\n",
   1091		      proc->pid, new_ref->data.debug_id, new_ref->data.desc,
   1092		      node->debug_id);
   1093	binder_node_unlock(node);
   1094	return new_ref;
   1095}
   1096
   1097static void binder_cleanup_ref_olocked(struct binder_ref *ref)
   1098{
   1099	bool delete_node = false;
   1100
   1101	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
   1102		     "%d delete ref %d desc %d for node %d\n",
   1103		      ref->proc->pid, ref->data.debug_id, ref->data.desc,
   1104		      ref->node->debug_id);
   1105
   1106	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
   1107	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
   1108
   1109	binder_node_inner_lock(ref->node);
   1110	if (ref->data.strong)
   1111		binder_dec_node_nilocked(ref->node, 1, 1);
   1112
   1113	hlist_del(&ref->node_entry);
   1114	delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
   1115	binder_node_inner_unlock(ref->node);
   1116	/*
   1117	 * Clear ref->node unless we want the caller to free the node
   1118	 */
   1119	if (!delete_node) {
   1120		/*
   1121		 * The caller uses ref->node to determine
   1122		 * whether the node needs to be freed. Clear
   1123		 * it since the node is still alive.
   1124		 */
   1125		ref->node = NULL;
   1126	}
   1127
   1128	if (ref->death) {
   1129		binder_debug(BINDER_DEBUG_DEAD_BINDER,
   1130			     "%d delete ref %d desc %d has death notification\n",
   1131			      ref->proc->pid, ref->data.debug_id,
   1132			      ref->data.desc);
   1133		binder_dequeue_work(ref->proc, &ref->death->work);
   1134		binder_stats_deleted(BINDER_STAT_DEATH);
   1135	}
   1136	binder_stats_deleted(BINDER_STAT_REF);
   1137}
   1138
   1139/**
   1140 * binder_inc_ref_olocked() - increment the ref for given handle
   1141 * @ref:         ref to be incremented
   1142 * @strong:      if true, strong increment, else weak
   1143 * @target_list: list to queue node work on
   1144 *
   1145 * Increment the ref. @ref->proc->outer_lock must be held on entry
   1146 *
   1147 * Return: 0, if successful, else errno
   1148 */
   1149static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
   1150				  struct list_head *target_list)
   1151{
   1152	int ret;
   1153
   1154	if (strong) {
   1155		if (ref->data.strong == 0) {
   1156			ret = binder_inc_node(ref->node, 1, 1, target_list);
   1157			if (ret)
   1158				return ret;
   1159		}
   1160		ref->data.strong++;
   1161	} else {
   1162		if (ref->data.weak == 0) {
   1163			ret = binder_inc_node(ref->node, 0, 1, target_list);
   1164			if (ret)
   1165				return ret;
   1166		}
   1167		ref->data.weak++;
   1168	}
   1169	return 0;
   1170}
   1171
   1172/**
   1173 * binder_dec_ref() - dec the ref for given handle
   1174 * @ref:	ref to be decremented
   1175 * @strong:	if true, strong decrement, else weak
   1176 *
   1177 * Decrement the ref.
   1178 *
   1179 * Return: true if ref is cleaned up and ready to be freed
   1180 */
   1181static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
   1182{
   1183	if (strong) {
   1184		if (ref->data.strong == 0) {
   1185			binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
   1186					  ref->proc->pid, ref->data.debug_id,
   1187					  ref->data.desc, ref->data.strong,
   1188					  ref->data.weak);
   1189			return false;
   1190		}
   1191		ref->data.strong--;
   1192		if (ref->data.strong == 0)
   1193			binder_dec_node(ref->node, strong, 1);
   1194	} else {
   1195		if (ref->data.weak == 0) {
   1196			binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
   1197					  ref->proc->pid, ref->data.debug_id,
   1198					  ref->data.desc, ref->data.strong,
   1199					  ref->data.weak);
   1200			return false;
   1201		}
   1202		ref->data.weak--;
   1203	}
   1204	if (ref->data.strong == 0 && ref->data.weak == 0) {
   1205		binder_cleanup_ref_olocked(ref);
   1206		return true;
   1207	}
   1208	return false;
   1209}
   1210
   1211/**
   1212 * binder_get_node_from_ref() - get the node from the given proc/desc
   1213 * @proc:	proc containing the ref
   1214 * @desc:	the handle associated with the ref
   1215 * @need_strong_ref: if true, only return node if ref is strong
   1216 * @rdata:	the id/refcount data for the ref
   1217 *
   1218 * Given a proc and ref handle, return the associated binder_node
   1219 *
   1220 * Return: a binder_node or NULL if not found or not strong when strong required
   1221 */
   1222static struct binder_node *binder_get_node_from_ref(
   1223		struct binder_proc *proc,
   1224		u32 desc, bool need_strong_ref,
   1225		struct binder_ref_data *rdata)
   1226{
   1227	struct binder_node *node;
   1228	struct binder_ref *ref;
   1229
   1230	binder_proc_lock(proc);
   1231	ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
   1232	if (!ref)
   1233		goto err_no_ref;
   1234	node = ref->node;
   1235	/*
   1236	 * Take an implicit reference on the node to ensure
   1237	 * it stays alive until the call to binder_put_node()
   1238	 */
   1239	binder_inc_node_tmpref(node);
   1240	if (rdata)
   1241		*rdata = ref->data;
   1242	binder_proc_unlock(proc);
   1243
   1244	return node;
   1245
   1246err_no_ref:
   1247	binder_proc_unlock(proc);
   1248	return NULL;
   1249}
   1250
   1251/**
   1252 * binder_free_ref() - free the binder_ref
   1253 * @ref:	ref to free
   1254 *
   1255 * Free the binder_ref. Free the binder_node indicated by ref->node
   1256 * (if non-NULL) and the binder_ref_death indicated by ref->death.
   1257 */
   1258static void binder_free_ref(struct binder_ref *ref)
   1259{
   1260	if (ref->node)
   1261		binder_free_node(ref->node);
   1262	kfree(ref->death);
   1263	kfree(ref);
   1264}
   1265
   1266/**
   1267 * binder_update_ref_for_handle() - inc/dec the ref for given handle
   1268 * @proc:	proc containing the ref
   1269 * @desc:	the handle associated with the ref
   1270 * @increment:	true=inc reference, false=dec reference
   1271 * @strong:	true=strong reference, false=weak reference
   1272 * @rdata:	the id/refcount data for the ref
   1273 *
   1274 * Given a proc and ref handle, increment or decrement the ref
   1275 * according to "increment" arg.
   1276 *
   1277 * Return: 0 if successful, else errno
   1278 */
   1279static int binder_update_ref_for_handle(struct binder_proc *proc,
   1280		uint32_t desc, bool increment, bool strong,
   1281		struct binder_ref_data *rdata)
   1282{
   1283	int ret = 0;
   1284	struct binder_ref *ref;
   1285	bool delete_ref = false;
   1286
   1287	binder_proc_lock(proc);
   1288	ref = binder_get_ref_olocked(proc, desc, strong);
   1289	if (!ref) {
   1290		ret = -EINVAL;
   1291		goto err_no_ref;
   1292	}
   1293	if (increment)
   1294		ret = binder_inc_ref_olocked(ref, strong, NULL);
   1295	else
   1296		delete_ref = binder_dec_ref_olocked(ref, strong);
   1297
   1298	if (rdata)
   1299		*rdata = ref->data;
   1300	binder_proc_unlock(proc);
   1301
   1302	if (delete_ref)
   1303		binder_free_ref(ref);
   1304	return ret;
   1305
   1306err_no_ref:
   1307	binder_proc_unlock(proc);
   1308	return ret;
   1309}
   1310
   1311/**
   1312 * binder_dec_ref_for_handle() - dec the ref for given handle
   1313 * @proc:	proc containing the ref
   1314 * @desc:	the handle associated with the ref
   1315 * @strong:	true=strong reference, false=weak reference
   1316 * @rdata:	the id/refcount data for the ref
   1317 *
   1318 * Just calls binder_update_ref_for_handle() to decrement the ref.
   1319 *
   1320 * Return: 0 if successful, else errno
   1321 */
   1322static int binder_dec_ref_for_handle(struct binder_proc *proc,
   1323		uint32_t desc, bool strong, struct binder_ref_data *rdata)
   1324{
   1325	return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
   1326}
   1327
   1328
   1329/**
   1330 * binder_inc_ref_for_node() - increment the ref for given proc/node
   1331 * @proc:	 proc containing the ref
   1332 * @node:	 target node
   1333 * @strong:	 true=strong reference, false=weak reference
   1334 * @target_list: worklist to use if node is incremented
   1335 * @rdata:	 the id/refcount data for the ref
   1336 *
   1337 * Given a proc and node, increment the ref. Create the ref if it
   1338 * doesn't already exist
   1339 *
   1340 * Return: 0 if successful, else errno
   1341 */
   1342static int binder_inc_ref_for_node(struct binder_proc *proc,
   1343			struct binder_node *node,
   1344			bool strong,
   1345			struct list_head *target_list,
   1346			struct binder_ref_data *rdata)
   1347{
   1348	struct binder_ref *ref;
   1349	struct binder_ref *new_ref = NULL;
   1350	int ret = 0;
   1351
   1352	binder_proc_lock(proc);
   1353	ref = binder_get_ref_for_node_olocked(proc, node, NULL);
   1354	if (!ref) {
   1355		binder_proc_unlock(proc);
   1356		new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
   1357		if (!new_ref)
   1358			return -ENOMEM;
   1359		binder_proc_lock(proc);
   1360		ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
   1361	}
   1362	ret = binder_inc_ref_olocked(ref, strong, target_list);
   1363	*rdata = ref->data;
   1364	binder_proc_unlock(proc);
   1365	if (new_ref && ref != new_ref)
   1366		/*
   1367		 * Another thread created the ref first so
   1368		 * free the one we allocated
   1369		 */
   1370		kfree(new_ref);
   1371	return ret;
   1372}
   1373
   1374static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
   1375					   struct binder_transaction *t)
   1376{
   1377	BUG_ON(!target_thread);
   1378	assert_spin_locked(&target_thread->proc->inner_lock);
   1379	BUG_ON(target_thread->transaction_stack != t);
   1380	BUG_ON(target_thread->transaction_stack->from != target_thread);
   1381	target_thread->transaction_stack =
   1382		target_thread->transaction_stack->from_parent;
   1383	t->from = NULL;
   1384}
   1385
   1386/**
   1387 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
   1388 * @thread:	thread to decrement
   1389 *
   1390 * A thread needs to be kept alive while being used to create or
   1391 * handle a transaction. binder_get_txn_from() is used to safely
   1392 * extract t->from from a binder_transaction and keep the thread
   1393 * indicated by t->from from being freed. When done with that
   1394 * binder_thread, this function is called to decrement the
   1395 * tmp_ref and free if appropriate (thread has been released
   1396 * and no transaction being processed by the driver)
   1397 */
   1398static void binder_thread_dec_tmpref(struct binder_thread *thread)
   1399{
   1400	/*
   1401	 * atomic is used to protect the counter value while
   1402	 * it cannot reach zero or thread->is_dead is false
   1403	 */
   1404	binder_inner_proc_lock(thread->proc);
   1405	atomic_dec(&thread->tmp_ref);
   1406	if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
   1407		binder_inner_proc_unlock(thread->proc);
   1408		binder_free_thread(thread);
   1409		return;
   1410	}
   1411	binder_inner_proc_unlock(thread->proc);
   1412}
   1413
   1414/**
   1415 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
   1416 * @proc:	proc to decrement
   1417 *
   1418 * A binder_proc needs to be kept alive while being used to create or
   1419 * handle a transaction. proc->tmp_ref is incremented when
   1420 * creating a new transaction or the binder_proc is currently in-use
   1421 * by threads that are being released. When done with the binder_proc,
   1422 * this function is called to decrement the counter and free the
   1423 * proc if appropriate (proc has been released, all threads have
   1424 * been released and not currenly in-use to process a transaction).
   1425 */
   1426static void binder_proc_dec_tmpref(struct binder_proc *proc)
   1427{
   1428	binder_inner_proc_lock(proc);
   1429	proc->tmp_ref--;
   1430	if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
   1431			!proc->tmp_ref) {
   1432		binder_inner_proc_unlock(proc);
   1433		binder_free_proc(proc);
   1434		return;
   1435	}
   1436	binder_inner_proc_unlock(proc);
   1437}
   1438
   1439/**
   1440 * binder_get_txn_from() - safely extract the "from" thread in transaction
   1441 * @t:	binder transaction for t->from
   1442 *
   1443 * Atomically return the "from" thread and increment the tmp_ref
   1444 * count for the thread to ensure it stays alive until
   1445 * binder_thread_dec_tmpref() is called.
   1446 *
   1447 * Return: the value of t->from
   1448 */
   1449static struct binder_thread *binder_get_txn_from(
   1450		struct binder_transaction *t)
   1451{
   1452	struct binder_thread *from;
   1453
   1454	spin_lock(&t->lock);
   1455	from = t->from;
   1456	if (from)
   1457		atomic_inc(&from->tmp_ref);
   1458	spin_unlock(&t->lock);
   1459	return from;
   1460}
   1461
   1462/**
   1463 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
   1464 * @t:	binder transaction for t->from
   1465 *
   1466 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
   1467 * to guarantee that the thread cannot be released while operating on it.
   1468 * The caller must call binder_inner_proc_unlock() to release the inner lock
   1469 * as well as call binder_dec_thread_txn() to release the reference.
   1470 *
   1471 * Return: the value of t->from
   1472 */
   1473static struct binder_thread *binder_get_txn_from_and_acq_inner(
   1474		struct binder_transaction *t)
   1475	__acquires(&t->from->proc->inner_lock)
   1476{
   1477	struct binder_thread *from;
   1478
   1479	from = binder_get_txn_from(t);
   1480	if (!from) {
   1481		__acquire(&from->proc->inner_lock);
   1482		return NULL;
   1483	}
   1484	binder_inner_proc_lock(from->proc);
   1485	if (t->from) {
   1486		BUG_ON(from != t->from);
   1487		return from;
   1488	}
   1489	binder_inner_proc_unlock(from->proc);
   1490	__acquire(&from->proc->inner_lock);
   1491	binder_thread_dec_tmpref(from);
   1492	return NULL;
   1493}
   1494
   1495/**
   1496 * binder_free_txn_fixups() - free unprocessed fd fixups
   1497 * @t:	binder transaction for t->from
   1498 *
   1499 * If the transaction is being torn down prior to being
   1500 * processed by the target process, free all of the
   1501 * fd fixups and fput the file structs. It is safe to
   1502 * call this function after the fixups have been
   1503 * processed -- in that case, the list will be empty.
   1504 */
   1505static void binder_free_txn_fixups(struct binder_transaction *t)
   1506{
   1507	struct binder_txn_fd_fixup *fixup, *tmp;
   1508
   1509	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
   1510		fput(fixup->file);
   1511		if (fixup->target_fd >= 0)
   1512			put_unused_fd(fixup->target_fd);
   1513		list_del(&fixup->fixup_entry);
   1514		kfree(fixup);
   1515	}
   1516}
   1517
   1518static void binder_txn_latency_free(struct binder_transaction *t)
   1519{
   1520	int from_proc, from_thread, to_proc, to_thread;
   1521
   1522	spin_lock(&t->lock);
   1523	from_proc = t->from ? t->from->proc->pid : 0;
   1524	from_thread = t->from ? t->from->pid : 0;
   1525	to_proc = t->to_proc ? t->to_proc->pid : 0;
   1526	to_thread = t->to_thread ? t->to_thread->pid : 0;
   1527	spin_unlock(&t->lock);
   1528
   1529	trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
   1530}
   1531
   1532static void binder_free_transaction(struct binder_transaction *t)
   1533{
   1534	struct binder_proc *target_proc = t->to_proc;
   1535
   1536	if (target_proc) {
   1537		binder_inner_proc_lock(target_proc);
   1538		target_proc->outstanding_txns--;
   1539		if (target_proc->outstanding_txns < 0)
   1540			pr_warn("%s: Unexpected outstanding_txns %d\n",
   1541				__func__, target_proc->outstanding_txns);
   1542		if (!target_proc->outstanding_txns && target_proc->is_frozen)
   1543			wake_up_interruptible_all(&target_proc->freeze_wait);
   1544		if (t->buffer)
   1545			t->buffer->transaction = NULL;
   1546		binder_inner_proc_unlock(target_proc);
   1547	}
   1548	if (trace_binder_txn_latency_free_enabled())
   1549		binder_txn_latency_free(t);
   1550	/*
   1551	 * If the transaction has no target_proc, then
   1552	 * t->buffer->transaction has already been cleared.
   1553	 */
   1554	binder_free_txn_fixups(t);
   1555	kfree(t);
   1556	binder_stats_deleted(BINDER_STAT_TRANSACTION);
   1557}
   1558
   1559static void binder_send_failed_reply(struct binder_transaction *t,
   1560				     uint32_t error_code)
   1561{
   1562	struct binder_thread *target_thread;
   1563	struct binder_transaction *next;
   1564
   1565	BUG_ON(t->flags & TF_ONE_WAY);
   1566	while (1) {
   1567		target_thread = binder_get_txn_from_and_acq_inner(t);
   1568		if (target_thread) {
   1569			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
   1570				     "send failed reply for transaction %d to %d:%d\n",
   1571				      t->debug_id,
   1572				      target_thread->proc->pid,
   1573				      target_thread->pid);
   1574
   1575			binder_pop_transaction_ilocked(target_thread, t);
   1576			if (target_thread->reply_error.cmd == BR_OK) {
   1577				target_thread->reply_error.cmd = error_code;
   1578				binder_enqueue_thread_work_ilocked(
   1579					target_thread,
   1580					&target_thread->reply_error.work);
   1581				wake_up_interruptible(&target_thread->wait);
   1582			} else {
   1583				/*
   1584				 * Cannot get here for normal operation, but
   1585				 * we can if multiple synchronous transactions
   1586				 * are sent without blocking for responses.
   1587				 * Just ignore the 2nd error in this case.
   1588				 */
   1589				pr_warn("Unexpected reply error: %u\n",
   1590					target_thread->reply_error.cmd);
   1591			}
   1592			binder_inner_proc_unlock(target_thread->proc);
   1593			binder_thread_dec_tmpref(target_thread);
   1594			binder_free_transaction(t);
   1595			return;
   1596		}
   1597		__release(&target_thread->proc->inner_lock);
   1598		next = t->from_parent;
   1599
   1600		binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
   1601			     "send failed reply for transaction %d, target dead\n",
   1602			     t->debug_id);
   1603
   1604		binder_free_transaction(t);
   1605		if (next == NULL) {
   1606			binder_debug(BINDER_DEBUG_DEAD_BINDER,
   1607				     "reply failed, no target thread at root\n");
   1608			return;
   1609		}
   1610		t = next;
   1611		binder_debug(BINDER_DEBUG_DEAD_BINDER,
   1612			     "reply failed, no target thread -- retry %d\n",
   1613			      t->debug_id);
   1614	}
   1615}
   1616
   1617/**
   1618 * binder_cleanup_transaction() - cleans up undelivered transaction
   1619 * @t:		transaction that needs to be cleaned up
   1620 * @reason:	reason the transaction wasn't delivered
   1621 * @error_code:	error to return to caller (if synchronous call)
   1622 */
   1623static void binder_cleanup_transaction(struct binder_transaction *t,
   1624				       const char *reason,
   1625				       uint32_t error_code)
   1626{
   1627	if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
   1628		binder_send_failed_reply(t, error_code);
   1629	} else {
   1630		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
   1631			"undelivered transaction %d, %s\n",
   1632			t->debug_id, reason);
   1633		binder_free_transaction(t);
   1634	}
   1635}
   1636
   1637/**
   1638 * binder_get_object() - gets object and checks for valid metadata
   1639 * @proc:	binder_proc owning the buffer
   1640 * @u:		sender's user pointer to base of buffer
   1641 * @buffer:	binder_buffer that we're parsing.
   1642 * @offset:	offset in the @buffer at which to validate an object.
   1643 * @object:	struct binder_object to read into
   1644 *
   1645 * Copy the binder object at the given offset into @object. If @u is
   1646 * provided then the copy is from the sender's buffer. If not, then
   1647 * it is copied from the target's @buffer.
   1648 *
   1649 * Return:	If there's a valid metadata object at @offset, the
   1650 *		size of that object. Otherwise, it returns zero. The object
   1651 *		is read into the struct binder_object pointed to by @object.
   1652 */
   1653static size_t binder_get_object(struct binder_proc *proc,
   1654				const void __user *u,
   1655				struct binder_buffer *buffer,
   1656				unsigned long offset,
   1657				struct binder_object *object)
   1658{
   1659	size_t read_size;
   1660	struct binder_object_header *hdr;
   1661	size_t object_size = 0;
   1662
   1663	read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
   1664	if (offset > buffer->data_size || read_size < sizeof(*hdr))
   1665		return 0;
   1666	if (u) {
   1667		if (copy_from_user(object, u + offset, read_size))
   1668			return 0;
   1669	} else {
   1670		if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
   1671						  offset, read_size))
   1672			return 0;
   1673	}
   1674
   1675	/* Ok, now see if we read a complete object. */
   1676	hdr = &object->hdr;
   1677	switch (hdr->type) {
   1678	case BINDER_TYPE_BINDER:
   1679	case BINDER_TYPE_WEAK_BINDER:
   1680	case BINDER_TYPE_HANDLE:
   1681	case BINDER_TYPE_WEAK_HANDLE:
   1682		object_size = sizeof(struct flat_binder_object);
   1683		break;
   1684	case BINDER_TYPE_FD:
   1685		object_size = sizeof(struct binder_fd_object);
   1686		break;
   1687	case BINDER_TYPE_PTR:
   1688		object_size = sizeof(struct binder_buffer_object);
   1689		break;
   1690	case BINDER_TYPE_FDA:
   1691		object_size = sizeof(struct binder_fd_array_object);
   1692		break;
   1693	default:
   1694		return 0;
   1695	}
   1696	if (offset <= buffer->data_size - object_size &&
   1697	    buffer->data_size >= object_size)
   1698		return object_size;
   1699	else
   1700		return 0;
   1701}
   1702
   1703/**
   1704 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
   1705 * @proc:	binder_proc owning the buffer
   1706 * @b:		binder_buffer containing the object
   1707 * @object:	struct binder_object to read into
   1708 * @index:	index in offset array at which the binder_buffer_object is
   1709 *		located
   1710 * @start_offset: points to the start of the offset array
   1711 * @object_offsetp: offset of @object read from @b
   1712 * @num_valid:	the number of valid offsets in the offset array
   1713 *
   1714 * Return:	If @index is within the valid range of the offset array
   1715 *		described by @start and @num_valid, and if there's a valid
   1716 *		binder_buffer_object at the offset found in index @index
   1717 *		of the offset array, that object is returned. Otherwise,
   1718 *		%NULL is returned.
   1719 *		Note that the offset found in index @index itself is not
   1720 *		verified; this function assumes that @num_valid elements
   1721 *		from @start were previously verified to have valid offsets.
   1722 *		If @object_offsetp is non-NULL, then the offset within
   1723 *		@b is written to it.
   1724 */
   1725static struct binder_buffer_object *binder_validate_ptr(
   1726						struct binder_proc *proc,
   1727						struct binder_buffer *b,
   1728						struct binder_object *object,
   1729						binder_size_t index,
   1730						binder_size_t start_offset,
   1731						binder_size_t *object_offsetp,
   1732						binder_size_t num_valid)
   1733{
   1734	size_t object_size;
   1735	binder_size_t object_offset;
   1736	unsigned long buffer_offset;
   1737
   1738	if (index >= num_valid)
   1739		return NULL;
   1740
   1741	buffer_offset = start_offset + sizeof(binder_size_t) * index;
   1742	if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
   1743					  b, buffer_offset,
   1744					  sizeof(object_offset)))
   1745		return NULL;
   1746	object_size = binder_get_object(proc, NULL, b, object_offset, object);
   1747	if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
   1748		return NULL;
   1749	if (object_offsetp)
   1750		*object_offsetp = object_offset;
   1751
   1752	return &object->bbo;
   1753}
   1754
   1755/**
   1756 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
   1757 * @proc:		binder_proc owning the buffer
   1758 * @b:			transaction buffer
   1759 * @objects_start_offset: offset to start of objects buffer
   1760 * @buffer_obj_offset:	offset to binder_buffer_object in which to fix up
   1761 * @fixup_offset:	start offset in @buffer to fix up
   1762 * @last_obj_offset:	offset to last binder_buffer_object that we fixed
   1763 * @last_min_offset:	minimum fixup offset in object at @last_obj_offset
   1764 *
   1765 * Return:		%true if a fixup in buffer @buffer at offset @offset is
   1766 *			allowed.
   1767 *
   1768 * For safety reasons, we only allow fixups inside a buffer to happen
   1769 * at increasing offsets; additionally, we only allow fixup on the last
   1770 * buffer object that was verified, or one of its parents.
   1771 *
   1772 * Example of what is allowed:
   1773 *
   1774 * A
   1775 *   B (parent = A, offset = 0)
   1776 *   C (parent = A, offset = 16)
   1777 *     D (parent = C, offset = 0)
   1778 *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
   1779 *
   1780 * Examples of what is not allowed:
   1781 *
   1782 * Decreasing offsets within the same parent:
   1783 * A
   1784 *   C (parent = A, offset = 16)
   1785 *   B (parent = A, offset = 0) // decreasing offset within A
   1786 *
   1787 * Referring to a parent that wasn't the last object or any of its parents:
   1788 * A
   1789 *   B (parent = A, offset = 0)
   1790 *   C (parent = A, offset = 0)
   1791 *   C (parent = A, offset = 16)
   1792 *     D (parent = B, offset = 0) // B is not A or any of A's parents
   1793 */
   1794static bool binder_validate_fixup(struct binder_proc *proc,
   1795				  struct binder_buffer *b,
   1796				  binder_size_t objects_start_offset,
   1797				  binder_size_t buffer_obj_offset,
   1798				  binder_size_t fixup_offset,
   1799				  binder_size_t last_obj_offset,
   1800				  binder_size_t last_min_offset)
   1801{
   1802	if (!last_obj_offset) {
   1803		/* Nothing to fix up in */
   1804		return false;
   1805	}
   1806
   1807	while (last_obj_offset != buffer_obj_offset) {
   1808		unsigned long buffer_offset;
   1809		struct binder_object last_object;
   1810		struct binder_buffer_object *last_bbo;
   1811		size_t object_size = binder_get_object(proc, NULL, b,
   1812						       last_obj_offset,
   1813						       &last_object);
   1814		if (object_size != sizeof(*last_bbo))
   1815			return false;
   1816
   1817		last_bbo = &last_object.bbo;
   1818		/*
   1819		 * Safe to retrieve the parent of last_obj, since it
   1820		 * was already previously verified by the driver.
   1821		 */
   1822		if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
   1823			return false;
   1824		last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
   1825		buffer_offset = objects_start_offset +
   1826			sizeof(binder_size_t) * last_bbo->parent;
   1827		if (binder_alloc_copy_from_buffer(&proc->alloc,
   1828						  &last_obj_offset,
   1829						  b, buffer_offset,
   1830						  sizeof(last_obj_offset)))
   1831			return false;
   1832	}
   1833	return (fixup_offset >= last_min_offset);
   1834}
   1835
   1836/**
   1837 * struct binder_task_work_cb - for deferred close
   1838 *
   1839 * @twork:                callback_head for task work
   1840 * @fd:                   fd to close
   1841 *
   1842 * Structure to pass task work to be handled after
   1843 * returning from binder_ioctl() via task_work_add().
   1844 */
   1845struct binder_task_work_cb {
   1846	struct callback_head twork;
   1847	struct file *file;
   1848};
   1849
   1850/**
   1851 * binder_do_fd_close() - close list of file descriptors
   1852 * @twork:	callback head for task work
   1853 *
   1854 * It is not safe to call ksys_close() during the binder_ioctl()
   1855 * function if there is a chance that binder's own file descriptor
   1856 * might be closed. This is to meet the requirements for using
   1857 * fdget() (see comments for __fget_light()). Therefore use
   1858 * task_work_add() to schedule the close operation once we have
   1859 * returned from binder_ioctl(). This function is a callback
   1860 * for that mechanism and does the actual ksys_close() on the
   1861 * given file descriptor.
   1862 */
   1863static void binder_do_fd_close(struct callback_head *twork)
   1864{
   1865	struct binder_task_work_cb *twcb = container_of(twork,
   1866			struct binder_task_work_cb, twork);
   1867
   1868	fput(twcb->file);
   1869	kfree(twcb);
   1870}
   1871
   1872/**
   1873 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
   1874 * @fd:		file-descriptor to close
   1875 *
   1876 * See comments in binder_do_fd_close(). This function is used to schedule
   1877 * a file-descriptor to be closed after returning from binder_ioctl().
   1878 */
   1879static void binder_deferred_fd_close(int fd)
   1880{
   1881	struct binder_task_work_cb *twcb;
   1882
   1883	twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
   1884	if (!twcb)
   1885		return;
   1886	init_task_work(&twcb->twork, binder_do_fd_close);
   1887	twcb->file = close_fd_get_file(fd);
   1888	if (twcb->file) {
   1889		// pin it until binder_do_fd_close(); see comments there
   1890		get_file(twcb->file);
   1891		filp_close(twcb->file, current->files);
   1892		task_work_add(current, &twcb->twork, TWA_RESUME);
   1893	} else {
   1894		kfree(twcb);
   1895	}
   1896}
   1897
   1898static void binder_transaction_buffer_release(struct binder_proc *proc,
   1899					      struct binder_thread *thread,
   1900					      struct binder_buffer *buffer,
   1901					      binder_size_t failed_at,
   1902					      bool is_failure)
   1903{
   1904	int debug_id = buffer->debug_id;
   1905	binder_size_t off_start_offset, buffer_offset, off_end_offset;
   1906
   1907	binder_debug(BINDER_DEBUG_TRANSACTION,
   1908		     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
   1909		     proc->pid, buffer->debug_id,
   1910		     buffer->data_size, buffer->offsets_size,
   1911		     (unsigned long long)failed_at);
   1912
   1913	if (buffer->target_node)
   1914		binder_dec_node(buffer->target_node, 1, 0);
   1915
   1916	off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
   1917	off_end_offset = is_failure && failed_at ? failed_at :
   1918				off_start_offset + buffer->offsets_size;
   1919	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
   1920	     buffer_offset += sizeof(binder_size_t)) {
   1921		struct binder_object_header *hdr;
   1922		size_t object_size = 0;
   1923		struct binder_object object;
   1924		binder_size_t object_offset;
   1925
   1926		if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
   1927						   buffer, buffer_offset,
   1928						   sizeof(object_offset)))
   1929			object_size = binder_get_object(proc, NULL, buffer,
   1930							object_offset, &object);
   1931		if (object_size == 0) {
   1932			pr_err("transaction release %d bad object at offset %lld, size %zd\n",
   1933			       debug_id, (u64)object_offset, buffer->data_size);
   1934			continue;
   1935		}
   1936		hdr = &object.hdr;
   1937		switch (hdr->type) {
   1938		case BINDER_TYPE_BINDER:
   1939		case BINDER_TYPE_WEAK_BINDER: {
   1940			struct flat_binder_object *fp;
   1941			struct binder_node *node;
   1942
   1943			fp = to_flat_binder_object(hdr);
   1944			node = binder_get_node(proc, fp->binder);
   1945			if (node == NULL) {
   1946				pr_err("transaction release %d bad node %016llx\n",
   1947				       debug_id, (u64)fp->binder);
   1948				break;
   1949			}
   1950			binder_debug(BINDER_DEBUG_TRANSACTION,
   1951				     "        node %d u%016llx\n",
   1952				     node->debug_id, (u64)node->ptr);
   1953			binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
   1954					0);
   1955			binder_put_node(node);
   1956		} break;
   1957		case BINDER_TYPE_HANDLE:
   1958		case BINDER_TYPE_WEAK_HANDLE: {
   1959			struct flat_binder_object *fp;
   1960			struct binder_ref_data rdata;
   1961			int ret;
   1962
   1963			fp = to_flat_binder_object(hdr);
   1964			ret = binder_dec_ref_for_handle(proc, fp->handle,
   1965				hdr->type == BINDER_TYPE_HANDLE, &rdata);
   1966
   1967			if (ret) {
   1968				pr_err("transaction release %d bad handle %d, ret = %d\n",
   1969				 debug_id, fp->handle, ret);
   1970				break;
   1971			}
   1972			binder_debug(BINDER_DEBUG_TRANSACTION,
   1973				     "        ref %d desc %d\n",
   1974				     rdata.debug_id, rdata.desc);
   1975		} break;
   1976
   1977		case BINDER_TYPE_FD: {
   1978			/*
   1979			 * No need to close the file here since user-space
   1980			 * closes it for successfully delivered
   1981			 * transactions. For transactions that weren't
   1982			 * delivered, the new fd was never allocated so
   1983			 * there is no need to close and the fput on the
   1984			 * file is done when the transaction is torn
   1985			 * down.
   1986			 */
   1987		} break;
   1988		case BINDER_TYPE_PTR:
   1989			/*
   1990			 * Nothing to do here, this will get cleaned up when the
   1991			 * transaction buffer gets freed
   1992			 */
   1993			break;
   1994		case BINDER_TYPE_FDA: {
   1995			struct binder_fd_array_object *fda;
   1996			struct binder_buffer_object *parent;
   1997			struct binder_object ptr_object;
   1998			binder_size_t fda_offset;
   1999			size_t fd_index;
   2000			binder_size_t fd_buf_size;
   2001			binder_size_t num_valid;
   2002
   2003			if (is_failure) {
   2004				/*
   2005				 * The fd fixups have not been applied so no
   2006				 * fds need to be closed.
   2007				 */
   2008				continue;
   2009			}
   2010
   2011			num_valid = (buffer_offset - off_start_offset) /
   2012						sizeof(binder_size_t);
   2013			fda = to_binder_fd_array_object(hdr);
   2014			parent = binder_validate_ptr(proc, buffer, &ptr_object,
   2015						     fda->parent,
   2016						     off_start_offset,
   2017						     NULL,
   2018						     num_valid);
   2019			if (!parent) {
   2020				pr_err("transaction release %d bad parent offset\n",
   2021				       debug_id);
   2022				continue;
   2023			}
   2024			fd_buf_size = sizeof(u32) * fda->num_fds;
   2025			if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
   2026				pr_err("transaction release %d invalid number of fds (%lld)\n",
   2027				       debug_id, (u64)fda->num_fds);
   2028				continue;
   2029			}
   2030			if (fd_buf_size > parent->length ||
   2031			    fda->parent_offset > parent->length - fd_buf_size) {
   2032				/* No space for all file descriptors here. */
   2033				pr_err("transaction release %d not enough space for %lld fds in buffer\n",
   2034				       debug_id, (u64)fda->num_fds);
   2035				continue;
   2036			}
   2037			/*
   2038			 * the source data for binder_buffer_object is visible
   2039			 * to user-space and the @buffer element is the user
   2040			 * pointer to the buffer_object containing the fd_array.
   2041			 * Convert the address to an offset relative to
   2042			 * the base of the transaction buffer.
   2043			 */
   2044			fda_offset =
   2045			    (parent->buffer - (uintptr_t)buffer->user_data) +
   2046			    fda->parent_offset;
   2047			for (fd_index = 0; fd_index < fda->num_fds;
   2048			     fd_index++) {
   2049				u32 fd;
   2050				int err;
   2051				binder_size_t offset = fda_offset +
   2052					fd_index * sizeof(fd);
   2053
   2054				err = binder_alloc_copy_from_buffer(
   2055						&proc->alloc, &fd, buffer,
   2056						offset, sizeof(fd));
   2057				WARN_ON(err);
   2058				if (!err) {
   2059					binder_deferred_fd_close(fd);
   2060					/*
   2061					 * Need to make sure the thread goes
   2062					 * back to userspace to complete the
   2063					 * deferred close
   2064					 */
   2065					if (thread)
   2066						thread->looper_need_return = true;
   2067				}
   2068			}
   2069		} break;
   2070		default:
   2071			pr_err("transaction release %d bad object type %x\n",
   2072				debug_id, hdr->type);
   2073			break;
   2074		}
   2075	}
   2076}
   2077
   2078static int binder_translate_binder(struct flat_binder_object *fp,
   2079				   struct binder_transaction *t,
   2080				   struct binder_thread *thread)
   2081{
   2082	struct binder_node *node;
   2083	struct binder_proc *proc = thread->proc;
   2084	struct binder_proc *target_proc = t->to_proc;
   2085	struct binder_ref_data rdata;
   2086	int ret = 0;
   2087
   2088	node = binder_get_node(proc, fp->binder);
   2089	if (!node) {
   2090		node = binder_new_node(proc, fp);
   2091		if (!node)
   2092			return -ENOMEM;
   2093	}
   2094	if (fp->cookie != node->cookie) {
   2095		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
   2096				  proc->pid, thread->pid, (u64)fp->binder,
   2097				  node->debug_id, (u64)fp->cookie,
   2098				  (u64)node->cookie);
   2099		ret = -EINVAL;
   2100		goto done;
   2101	}
   2102	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
   2103		ret = -EPERM;
   2104		goto done;
   2105	}
   2106
   2107	ret = binder_inc_ref_for_node(target_proc, node,
   2108			fp->hdr.type == BINDER_TYPE_BINDER,
   2109			&thread->todo, &rdata);
   2110	if (ret)
   2111		goto done;
   2112
   2113	if (fp->hdr.type == BINDER_TYPE_BINDER)
   2114		fp->hdr.type = BINDER_TYPE_HANDLE;
   2115	else
   2116		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
   2117	fp->binder = 0;
   2118	fp->handle = rdata.desc;
   2119	fp->cookie = 0;
   2120
   2121	trace_binder_transaction_node_to_ref(t, node, &rdata);
   2122	binder_debug(BINDER_DEBUG_TRANSACTION,
   2123		     "        node %d u%016llx -> ref %d desc %d\n",
   2124		     node->debug_id, (u64)node->ptr,
   2125		     rdata.debug_id, rdata.desc);
   2126done:
   2127	binder_put_node(node);
   2128	return ret;
   2129}
   2130
   2131static int binder_translate_handle(struct flat_binder_object *fp,
   2132				   struct binder_transaction *t,
   2133				   struct binder_thread *thread)
   2134{
   2135	struct binder_proc *proc = thread->proc;
   2136	struct binder_proc *target_proc = t->to_proc;
   2137	struct binder_node *node;
   2138	struct binder_ref_data src_rdata;
   2139	int ret = 0;
   2140
   2141	node = binder_get_node_from_ref(proc, fp->handle,
   2142			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
   2143	if (!node) {
   2144		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
   2145				  proc->pid, thread->pid, fp->handle);
   2146		return -EINVAL;
   2147	}
   2148	if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
   2149		ret = -EPERM;
   2150		goto done;
   2151	}
   2152
   2153	binder_node_lock(node);
   2154	if (node->proc == target_proc) {
   2155		if (fp->hdr.type == BINDER_TYPE_HANDLE)
   2156			fp->hdr.type = BINDER_TYPE_BINDER;
   2157		else
   2158			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
   2159		fp->binder = node->ptr;
   2160		fp->cookie = node->cookie;
   2161		if (node->proc)
   2162			binder_inner_proc_lock(node->proc);
   2163		else
   2164			__acquire(&node->proc->inner_lock);
   2165		binder_inc_node_nilocked(node,
   2166					 fp->hdr.type == BINDER_TYPE_BINDER,
   2167					 0, NULL);
   2168		if (node->proc)
   2169			binder_inner_proc_unlock(node->proc);
   2170		else
   2171			__release(&node->proc->inner_lock);
   2172		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
   2173		binder_debug(BINDER_DEBUG_TRANSACTION,
   2174			     "        ref %d desc %d -> node %d u%016llx\n",
   2175			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
   2176			     (u64)node->ptr);
   2177		binder_node_unlock(node);
   2178	} else {
   2179		struct binder_ref_data dest_rdata;
   2180
   2181		binder_node_unlock(node);
   2182		ret = binder_inc_ref_for_node(target_proc, node,
   2183				fp->hdr.type == BINDER_TYPE_HANDLE,
   2184				NULL, &dest_rdata);
   2185		if (ret)
   2186			goto done;
   2187
   2188		fp->binder = 0;
   2189		fp->handle = dest_rdata.desc;
   2190		fp->cookie = 0;
   2191		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
   2192						    &dest_rdata);
   2193		binder_debug(BINDER_DEBUG_TRANSACTION,
   2194			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
   2195			     src_rdata.debug_id, src_rdata.desc,
   2196			     dest_rdata.debug_id, dest_rdata.desc,
   2197			     node->debug_id);
   2198	}
   2199done:
   2200	binder_put_node(node);
   2201	return ret;
   2202}
   2203
   2204static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
   2205			       struct binder_transaction *t,
   2206			       struct binder_thread *thread,
   2207			       struct binder_transaction *in_reply_to)
   2208{
   2209	struct binder_proc *proc = thread->proc;
   2210	struct binder_proc *target_proc = t->to_proc;
   2211	struct binder_txn_fd_fixup *fixup;
   2212	struct file *file;
   2213	int ret = 0;
   2214	bool target_allows_fd;
   2215
   2216	if (in_reply_to)
   2217		target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
   2218	else
   2219		target_allows_fd = t->buffer->target_node->accept_fds;
   2220	if (!target_allows_fd) {
   2221		binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
   2222				  proc->pid, thread->pid,
   2223				  in_reply_to ? "reply" : "transaction",
   2224				  fd);
   2225		ret = -EPERM;
   2226		goto err_fd_not_accepted;
   2227	}
   2228
   2229	file = fget(fd);
   2230	if (!file) {
   2231		binder_user_error("%d:%d got transaction with invalid fd, %d\n",
   2232				  proc->pid, thread->pid, fd);
   2233		ret = -EBADF;
   2234		goto err_fget;
   2235	}
   2236	ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
   2237	if (ret < 0) {
   2238		ret = -EPERM;
   2239		goto err_security;
   2240	}
   2241
   2242	/*
   2243	 * Add fixup record for this transaction. The allocation
   2244	 * of the fd in the target needs to be done from a
   2245	 * target thread.
   2246	 */
   2247	fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
   2248	if (!fixup) {
   2249		ret = -ENOMEM;
   2250		goto err_alloc;
   2251	}
   2252	fixup->file = file;
   2253	fixup->offset = fd_offset;
   2254	fixup->target_fd = -1;
   2255	trace_binder_transaction_fd_send(t, fd, fixup->offset);
   2256	list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
   2257
   2258	return ret;
   2259
   2260err_alloc:
   2261err_security:
   2262	fput(file);
   2263err_fget:
   2264err_fd_not_accepted:
   2265	return ret;
   2266}
   2267
   2268/**
   2269 * struct binder_ptr_fixup - data to be fixed-up in target buffer
   2270 * @offset	offset in target buffer to fixup
   2271 * @skip_size	bytes to skip in copy (fixup will be written later)
   2272 * @fixup_data	data to write at fixup offset
   2273 * @node	list node
   2274 *
   2275 * This is used for the pointer fixup list (pf) which is created and consumed
   2276 * during binder_transaction() and is only accessed locally. No
   2277 * locking is necessary.
   2278 *
   2279 * The list is ordered by @offset.
   2280 */
   2281struct binder_ptr_fixup {
   2282	binder_size_t offset;
   2283	size_t skip_size;
   2284	binder_uintptr_t fixup_data;
   2285	struct list_head node;
   2286};
   2287
   2288/**
   2289 * struct binder_sg_copy - scatter-gather data to be copied
   2290 * @offset		offset in target buffer
   2291 * @sender_uaddr	user address in source buffer
   2292 * @length		bytes to copy
   2293 * @node		list node
   2294 *
   2295 * This is used for the sg copy list (sgc) which is created and consumed
   2296 * during binder_transaction() and is only accessed locally. No
   2297 * locking is necessary.
   2298 *
   2299 * The list is ordered by @offset.
   2300 */
   2301struct binder_sg_copy {
   2302	binder_size_t offset;
   2303	const void __user *sender_uaddr;
   2304	size_t length;
   2305	struct list_head node;
   2306};
   2307
   2308/**
   2309 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
   2310 * @alloc:	binder_alloc associated with @buffer
   2311 * @buffer:	binder buffer in target process
   2312 * @sgc_head:	list_head of scatter-gather copy list
   2313 * @pf_head:	list_head of pointer fixup list
   2314 *
   2315 * Processes all elements of @sgc_head, applying fixups from @pf_head
   2316 * and copying the scatter-gather data from the source process' user
   2317 * buffer to the target's buffer. It is expected that the list creation
   2318 * and processing all occurs during binder_transaction() so these lists
   2319 * are only accessed in local context.
   2320 *
   2321 * Return: 0=success, else -errno
   2322 */
   2323static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
   2324					 struct binder_buffer *buffer,
   2325					 struct list_head *sgc_head,
   2326					 struct list_head *pf_head)
   2327{
   2328	int ret = 0;
   2329	struct binder_sg_copy *sgc, *tmpsgc;
   2330	struct binder_ptr_fixup *tmppf;
   2331	struct binder_ptr_fixup *pf =
   2332		list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
   2333					 node);
   2334
   2335	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
   2336		size_t bytes_copied = 0;
   2337
   2338		while (bytes_copied < sgc->length) {
   2339			size_t copy_size;
   2340			size_t bytes_left = sgc->length - bytes_copied;
   2341			size_t offset = sgc->offset + bytes_copied;
   2342
   2343			/*
   2344			 * We copy up to the fixup (pointed to by pf)
   2345			 */
   2346			copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
   2347				       : bytes_left;
   2348			if (!ret && copy_size)
   2349				ret = binder_alloc_copy_user_to_buffer(
   2350						alloc, buffer,
   2351						offset,
   2352						sgc->sender_uaddr + bytes_copied,
   2353						copy_size);
   2354			bytes_copied += copy_size;
   2355			if (copy_size != bytes_left) {
   2356				BUG_ON(!pf);
   2357				/* we stopped at a fixup offset */
   2358				if (pf->skip_size) {
   2359					/*
   2360					 * we are just skipping. This is for
   2361					 * BINDER_TYPE_FDA where the translated
   2362					 * fds will be fixed up when we get
   2363					 * to target context.
   2364					 */
   2365					bytes_copied += pf->skip_size;
   2366				} else {
   2367					/* apply the fixup indicated by pf */
   2368					if (!ret)
   2369						ret = binder_alloc_copy_to_buffer(
   2370							alloc, buffer,
   2371							pf->offset,
   2372							&pf->fixup_data,
   2373							sizeof(pf->fixup_data));
   2374					bytes_copied += sizeof(pf->fixup_data);
   2375				}
   2376				list_del(&pf->node);
   2377				kfree(pf);
   2378				pf = list_first_entry_or_null(pf_head,
   2379						struct binder_ptr_fixup, node);
   2380			}
   2381		}
   2382		list_del(&sgc->node);
   2383		kfree(sgc);
   2384	}
   2385	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
   2386		BUG_ON(pf->skip_size == 0);
   2387		list_del(&pf->node);
   2388		kfree(pf);
   2389	}
   2390	BUG_ON(!list_empty(sgc_head));
   2391
   2392	return ret > 0 ? -EINVAL : ret;
   2393}
   2394
   2395/**
   2396 * binder_cleanup_deferred_txn_lists() - free specified lists
   2397 * @sgc_head:	list_head of scatter-gather copy list
   2398 * @pf_head:	list_head of pointer fixup list
   2399 *
   2400 * Called to clean up @sgc_head and @pf_head if there is an
   2401 * error.
   2402 */
   2403static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
   2404					      struct list_head *pf_head)
   2405{
   2406	struct binder_sg_copy *sgc, *tmpsgc;
   2407	struct binder_ptr_fixup *pf, *tmppf;
   2408
   2409	list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
   2410		list_del(&sgc->node);
   2411		kfree(sgc);
   2412	}
   2413	list_for_each_entry_safe(pf, tmppf, pf_head, node) {
   2414		list_del(&pf->node);
   2415		kfree(pf);
   2416	}
   2417}
   2418
   2419/**
   2420 * binder_defer_copy() - queue a scatter-gather buffer for copy
   2421 * @sgc_head:		list_head of scatter-gather copy list
   2422 * @offset:		binder buffer offset in target process
   2423 * @sender_uaddr:	user address in source process
   2424 * @length:		bytes to copy
   2425 *
   2426 * Specify a scatter-gather block to be copied. The actual copy must
   2427 * be deferred until all the needed fixups are identified and queued.
   2428 * Then the copy and fixups are done together so un-translated values
   2429 * from the source are never visible in the target buffer.
   2430 *
   2431 * We are guaranteed that repeated calls to this function will have
   2432 * monotonically increasing @offset values so the list will naturally
   2433 * be ordered.
   2434 *
   2435 * Return: 0=success, else -errno
   2436 */
   2437static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
   2438			     const void __user *sender_uaddr, size_t length)
   2439{
   2440	struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
   2441
   2442	if (!bc)
   2443		return -ENOMEM;
   2444
   2445	bc->offset = offset;
   2446	bc->sender_uaddr = sender_uaddr;
   2447	bc->length = length;
   2448	INIT_LIST_HEAD(&bc->node);
   2449
   2450	/*
   2451	 * We are guaranteed that the deferred copies are in-order
   2452	 * so just add to the tail.
   2453	 */
   2454	list_add_tail(&bc->node, sgc_head);
   2455
   2456	return 0;
   2457}
   2458
   2459/**
   2460 * binder_add_fixup() - queue a fixup to be applied to sg copy
   2461 * @pf_head:	list_head of binder ptr fixup list
   2462 * @offset:	binder buffer offset in target process
   2463 * @fixup:	bytes to be copied for fixup
   2464 * @skip_size:	bytes to skip when copying (fixup will be applied later)
   2465 *
   2466 * Add the specified fixup to a list ordered by @offset. When copying
   2467 * the scatter-gather buffers, the fixup will be copied instead of
   2468 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
   2469 * will be applied later (in target process context), so we just skip
   2470 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
   2471 * value in @fixup.
   2472 *
   2473 * This function is called *mostly* in @offset order, but there are
   2474 * exceptions. Since out-of-order inserts are relatively uncommon,
   2475 * we insert the new element by searching backward from the tail of
   2476 * the list.
   2477 *
   2478 * Return: 0=success, else -errno
   2479 */
   2480static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
   2481			    binder_uintptr_t fixup, size_t skip_size)
   2482{
   2483	struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
   2484	struct binder_ptr_fixup *tmppf;
   2485
   2486	if (!pf)
   2487		return -ENOMEM;
   2488
   2489	pf->offset = offset;
   2490	pf->fixup_data = fixup;
   2491	pf->skip_size = skip_size;
   2492	INIT_LIST_HEAD(&pf->node);
   2493
   2494	/* Fixups are *mostly* added in-order, but there are some
   2495	 * exceptions. Look backwards through list for insertion point.
   2496	 */
   2497	list_for_each_entry_reverse(tmppf, pf_head, node) {
   2498		if (tmppf->offset < pf->offset) {
   2499			list_add(&pf->node, &tmppf->node);
   2500			return 0;
   2501		}
   2502	}
   2503	/*
   2504	 * if we get here, then the new offset is the lowest so
   2505	 * insert at the head
   2506	 */
   2507	list_add(&pf->node, pf_head);
   2508	return 0;
   2509}
   2510
   2511static int binder_translate_fd_array(struct list_head *pf_head,
   2512				     struct binder_fd_array_object *fda,
   2513				     const void __user *sender_ubuffer,
   2514				     struct binder_buffer_object *parent,
   2515				     struct binder_buffer_object *sender_uparent,
   2516				     struct binder_transaction *t,
   2517				     struct binder_thread *thread,
   2518				     struct binder_transaction *in_reply_to)
   2519{
   2520	binder_size_t fdi, fd_buf_size;
   2521	binder_size_t fda_offset;
   2522	const void __user *sender_ufda_base;
   2523	struct binder_proc *proc = thread->proc;
   2524	int ret;
   2525
   2526	if (fda->num_fds == 0)
   2527		return 0;
   2528
   2529	fd_buf_size = sizeof(u32) * fda->num_fds;
   2530	if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
   2531		binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
   2532				  proc->pid, thread->pid, (u64)fda->num_fds);
   2533		return -EINVAL;
   2534	}
   2535	if (fd_buf_size > parent->length ||
   2536	    fda->parent_offset > parent->length - fd_buf_size) {
   2537		/* No space for all file descriptors here. */
   2538		binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
   2539				  proc->pid, thread->pid, (u64)fda->num_fds);
   2540		return -EINVAL;
   2541	}
   2542	/*
   2543	 * the source data for binder_buffer_object is visible
   2544	 * to user-space and the @buffer element is the user
   2545	 * pointer to the buffer_object containing the fd_array.
   2546	 * Convert the address to an offset relative to
   2547	 * the base of the transaction buffer.
   2548	 */
   2549	fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
   2550		fda->parent_offset;
   2551	sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
   2552				fda->parent_offset;
   2553
   2554	if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
   2555	    !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
   2556		binder_user_error("%d:%d parent offset not aligned correctly.\n",
   2557				  proc->pid, thread->pid);
   2558		return -EINVAL;
   2559	}
   2560	ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
   2561	if (ret)
   2562		return ret;
   2563
   2564	for (fdi = 0; fdi < fda->num_fds; fdi++) {
   2565		u32 fd;
   2566		binder_size_t offset = fda_offset + fdi * sizeof(fd);
   2567		binder_size_t sender_uoffset = fdi * sizeof(fd);
   2568
   2569		ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
   2570		if (!ret)
   2571			ret = binder_translate_fd(fd, offset, t, thread,
   2572						  in_reply_to);
   2573		if (ret)
   2574			return ret > 0 ? -EINVAL : ret;
   2575	}
   2576	return 0;
   2577}
   2578
   2579static int binder_fixup_parent(struct list_head *pf_head,
   2580			       struct binder_transaction *t,
   2581			       struct binder_thread *thread,
   2582			       struct binder_buffer_object *bp,
   2583			       binder_size_t off_start_offset,
   2584			       binder_size_t num_valid,
   2585			       binder_size_t last_fixup_obj_off,
   2586			       binder_size_t last_fixup_min_off)
   2587{
   2588	struct binder_buffer_object *parent;
   2589	struct binder_buffer *b = t->buffer;
   2590	struct binder_proc *proc = thread->proc;
   2591	struct binder_proc *target_proc = t->to_proc;
   2592	struct binder_object object;
   2593	binder_size_t buffer_offset;
   2594	binder_size_t parent_offset;
   2595
   2596	if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
   2597		return 0;
   2598
   2599	parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
   2600				     off_start_offset, &parent_offset,
   2601				     num_valid);
   2602	if (!parent) {
   2603		binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
   2604				  proc->pid, thread->pid);
   2605		return -EINVAL;
   2606	}
   2607
   2608	if (!binder_validate_fixup(target_proc, b, off_start_offset,
   2609				   parent_offset, bp->parent_offset,
   2610				   last_fixup_obj_off,
   2611				   last_fixup_min_off)) {
   2612		binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
   2613				  proc->pid, thread->pid);
   2614		return -EINVAL;
   2615	}
   2616
   2617	if (parent->length < sizeof(binder_uintptr_t) ||
   2618	    bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
   2619		/* No space for a pointer here! */
   2620		binder_user_error("%d:%d got transaction with invalid parent offset\n",
   2621				  proc->pid, thread->pid);
   2622		return -EINVAL;
   2623	}
   2624	buffer_offset = bp->parent_offset +
   2625			(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
   2626	return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
   2627}
   2628
   2629/**
   2630 * binder_proc_transaction() - sends a transaction to a process and wakes it up
   2631 * @t:		transaction to send
   2632 * @proc:	process to send the transaction to
   2633 * @thread:	thread in @proc to send the transaction to (may be NULL)
   2634 *
   2635 * This function queues a transaction to the specified process. It will try
   2636 * to find a thread in the target process to handle the transaction and
   2637 * wake it up. If no thread is found, the work is queued to the proc
   2638 * waitqueue.
   2639 *
   2640 * If the @thread parameter is not NULL, the transaction is always queued
   2641 * to the waitlist of that specific thread.
   2642 *
   2643 * Return:	0 if the transaction was successfully queued
   2644 *		BR_DEAD_REPLY if the target process or thread is dead
   2645 *		BR_FROZEN_REPLY if the target process or thread is frozen
   2646 */
   2647static int binder_proc_transaction(struct binder_transaction *t,
   2648				    struct binder_proc *proc,
   2649				    struct binder_thread *thread)
   2650{
   2651	struct binder_node *node = t->buffer->target_node;
   2652	bool oneway = !!(t->flags & TF_ONE_WAY);
   2653	bool pending_async = false;
   2654
   2655	BUG_ON(!node);
   2656	binder_node_lock(node);
   2657	if (oneway) {
   2658		BUG_ON(thread);
   2659		if (node->has_async_transaction)
   2660			pending_async = true;
   2661		else
   2662			node->has_async_transaction = true;
   2663	}
   2664
   2665	binder_inner_proc_lock(proc);
   2666	if (proc->is_frozen) {
   2667		proc->sync_recv |= !oneway;
   2668		proc->async_recv |= oneway;
   2669	}
   2670
   2671	if ((proc->is_frozen && !oneway) || proc->is_dead ||
   2672			(thread && thread->is_dead)) {
   2673		binder_inner_proc_unlock(proc);
   2674		binder_node_unlock(node);
   2675		return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
   2676	}
   2677
   2678	if (!thread && !pending_async)
   2679		thread = binder_select_thread_ilocked(proc);
   2680
   2681	if (thread)
   2682		binder_enqueue_thread_work_ilocked(thread, &t->work);
   2683	else if (!pending_async)
   2684		binder_enqueue_work_ilocked(&t->work, &proc->todo);
   2685	else
   2686		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
   2687
   2688	if (!pending_async)
   2689		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
   2690
   2691	proc->outstanding_txns++;
   2692	binder_inner_proc_unlock(proc);
   2693	binder_node_unlock(node);
   2694
   2695	return 0;
   2696}
   2697
   2698/**
   2699 * binder_get_node_refs_for_txn() - Get required refs on node for txn
   2700 * @node:         struct binder_node for which to get refs
   2701 * @proc:         returns @node->proc if valid
   2702 * @error:        if no @proc then returns BR_DEAD_REPLY
   2703 *
   2704 * User-space normally keeps the node alive when creating a transaction
   2705 * since it has a reference to the target. The local strong ref keeps it
   2706 * alive if the sending process dies before the target process processes
   2707 * the transaction. If the source process is malicious or has a reference
   2708 * counting bug, relying on the local strong ref can fail.
   2709 *
   2710 * Since user-space can cause the local strong ref to go away, we also take
   2711 * a tmpref on the node to ensure it survives while we are constructing
   2712 * the transaction. We also need a tmpref on the proc while we are
   2713 * constructing the transaction, so we take that here as well.
   2714 *
   2715 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
   2716 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
   2717 * target proc has died, @error is set to BR_DEAD_REPLY
   2718 */
   2719static struct binder_node *binder_get_node_refs_for_txn(
   2720		struct binder_node *node,
   2721		struct binder_proc **procp,
   2722		uint32_t *error)
   2723{
   2724	struct binder_node *target_node = NULL;
   2725
   2726	binder_node_inner_lock(node);
   2727	if (node->proc) {
   2728		target_node = node;
   2729		binder_inc_node_nilocked(node, 1, 0, NULL);
   2730		binder_inc_node_tmpref_ilocked(node);
   2731		node->proc->tmp_ref++;
   2732		*procp = node->proc;
   2733	} else
   2734		*error = BR_DEAD_REPLY;
   2735	binder_node_inner_unlock(node);
   2736
   2737	return target_node;
   2738}
   2739
   2740static void binder_set_txn_from_error(struct binder_transaction *t, int id,
   2741				      uint32_t command, int32_t param)
   2742{
   2743	struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
   2744
   2745	if (!from) {
   2746		/* annotation for sparse */
   2747		__release(&from->proc->inner_lock);
   2748		return;
   2749	}
   2750
   2751	/* don't override existing errors */
   2752	if (from->ee.command == BR_OK)
   2753		binder_set_extended_error(&from->ee, id, command, param);
   2754	binder_inner_proc_unlock(from->proc);
   2755	binder_thread_dec_tmpref(from);
   2756}
   2757
   2758static void binder_transaction(struct binder_proc *proc,
   2759			       struct binder_thread *thread,
   2760			       struct binder_transaction_data *tr, int reply,
   2761			       binder_size_t extra_buffers_size)
   2762{
   2763	int ret;
   2764	struct binder_transaction *t;
   2765	struct binder_work *w;
   2766	struct binder_work *tcomplete;
   2767	binder_size_t buffer_offset = 0;
   2768	binder_size_t off_start_offset, off_end_offset;
   2769	binder_size_t off_min;
   2770	binder_size_t sg_buf_offset, sg_buf_end_offset;
   2771	binder_size_t user_offset = 0;
   2772	struct binder_proc *target_proc = NULL;
   2773	struct binder_thread *target_thread = NULL;
   2774	struct binder_node *target_node = NULL;
   2775	struct binder_transaction *in_reply_to = NULL;
   2776	struct binder_transaction_log_entry *e;
   2777	uint32_t return_error = 0;
   2778	uint32_t return_error_param = 0;
   2779	uint32_t return_error_line = 0;
   2780	binder_size_t last_fixup_obj_off = 0;
   2781	binder_size_t last_fixup_min_off = 0;
   2782	struct binder_context *context = proc->context;
   2783	int t_debug_id = atomic_inc_return(&binder_last_id);
   2784	char *secctx = NULL;
   2785	u32 secctx_sz = 0;
   2786	struct list_head sgc_head;
   2787	struct list_head pf_head;
   2788	const void __user *user_buffer = (const void __user *)
   2789				(uintptr_t)tr->data.ptr.buffer;
   2790	INIT_LIST_HEAD(&sgc_head);
   2791	INIT_LIST_HEAD(&pf_head);
   2792
   2793	e = binder_transaction_log_add(&binder_transaction_log);
   2794	e->debug_id = t_debug_id;
   2795	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
   2796	e->from_proc = proc->pid;
   2797	e->from_thread = thread->pid;
   2798	e->target_handle = tr->target.handle;
   2799	e->data_size = tr->data_size;
   2800	e->offsets_size = tr->offsets_size;
   2801	strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
   2802
   2803	binder_inner_proc_lock(proc);
   2804	binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
   2805	binder_inner_proc_unlock(proc);
   2806
   2807	if (reply) {
   2808		binder_inner_proc_lock(proc);
   2809		in_reply_to = thread->transaction_stack;
   2810		if (in_reply_to == NULL) {
   2811			binder_inner_proc_unlock(proc);
   2812			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
   2813					  proc->pid, thread->pid);
   2814			return_error = BR_FAILED_REPLY;
   2815			return_error_param = -EPROTO;
   2816			return_error_line = __LINE__;
   2817			goto err_empty_call_stack;
   2818		}
   2819		if (in_reply_to->to_thread != thread) {
   2820			spin_lock(&in_reply_to->lock);
   2821			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
   2822				proc->pid, thread->pid, in_reply_to->debug_id,
   2823				in_reply_to->to_proc ?
   2824				in_reply_to->to_proc->pid : 0,
   2825				in_reply_to->to_thread ?
   2826				in_reply_to->to_thread->pid : 0);
   2827			spin_unlock(&in_reply_to->lock);
   2828			binder_inner_proc_unlock(proc);
   2829			return_error = BR_FAILED_REPLY;
   2830			return_error_param = -EPROTO;
   2831			return_error_line = __LINE__;
   2832			in_reply_to = NULL;
   2833			goto err_bad_call_stack;
   2834		}
   2835		thread->transaction_stack = in_reply_to->to_parent;
   2836		binder_inner_proc_unlock(proc);
   2837		binder_set_nice(in_reply_to->saved_priority);
   2838		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
   2839		if (target_thread == NULL) {
   2840			/* annotation for sparse */
   2841			__release(&target_thread->proc->inner_lock);
   2842			binder_txn_error("%d:%d reply target not found\n",
   2843				thread->pid, proc->pid);
   2844			return_error = BR_DEAD_REPLY;
   2845			return_error_line = __LINE__;
   2846			goto err_dead_binder;
   2847		}
   2848		if (target_thread->transaction_stack != in_reply_to) {
   2849			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
   2850				proc->pid, thread->pid,
   2851				target_thread->transaction_stack ?
   2852				target_thread->transaction_stack->debug_id : 0,
   2853				in_reply_to->debug_id);
   2854			binder_inner_proc_unlock(target_thread->proc);
   2855			return_error = BR_FAILED_REPLY;
   2856			return_error_param = -EPROTO;
   2857			return_error_line = __LINE__;
   2858			in_reply_to = NULL;
   2859			target_thread = NULL;
   2860			goto err_dead_binder;
   2861		}
   2862		target_proc = target_thread->proc;
   2863		target_proc->tmp_ref++;
   2864		binder_inner_proc_unlock(target_thread->proc);
   2865	} else {
   2866		if (tr->target.handle) {
   2867			struct binder_ref *ref;
   2868
   2869			/*
   2870			 * There must already be a strong ref
   2871			 * on this node. If so, do a strong
   2872			 * increment on the node to ensure it
   2873			 * stays alive until the transaction is
   2874			 * done.
   2875			 */
   2876			binder_proc_lock(proc);
   2877			ref = binder_get_ref_olocked(proc, tr->target.handle,
   2878						     true);
   2879			if (ref) {
   2880				target_node = binder_get_node_refs_for_txn(
   2881						ref->node, &target_proc,
   2882						&return_error);
   2883			} else {
   2884				binder_user_error("%d:%d got transaction to invalid handle, %u\n",
   2885						  proc->pid, thread->pid, tr->target.handle);
   2886				return_error = BR_FAILED_REPLY;
   2887			}
   2888			binder_proc_unlock(proc);
   2889		} else {
   2890			mutex_lock(&context->context_mgr_node_lock);
   2891			target_node = context->binder_context_mgr_node;
   2892			if (target_node)
   2893				target_node = binder_get_node_refs_for_txn(
   2894						target_node, &target_proc,
   2895						&return_error);
   2896			else
   2897				return_error = BR_DEAD_REPLY;
   2898			mutex_unlock(&context->context_mgr_node_lock);
   2899			if (target_node && target_proc->pid == proc->pid) {
   2900				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
   2901						  proc->pid, thread->pid);
   2902				return_error = BR_FAILED_REPLY;
   2903				return_error_param = -EINVAL;
   2904				return_error_line = __LINE__;
   2905				goto err_invalid_target_handle;
   2906			}
   2907		}
   2908		if (!target_node) {
   2909			binder_txn_error("%d:%d cannot find target node\n",
   2910				thread->pid, proc->pid);
   2911			/*
   2912			 * return_error is set above
   2913			 */
   2914			return_error_param = -EINVAL;
   2915			return_error_line = __LINE__;
   2916			goto err_dead_binder;
   2917		}
   2918		e->to_node = target_node->debug_id;
   2919		if (WARN_ON(proc == target_proc)) {
   2920			binder_txn_error("%d:%d self transactions not allowed\n",
   2921				thread->pid, proc->pid);
   2922			return_error = BR_FAILED_REPLY;
   2923			return_error_param = -EINVAL;
   2924			return_error_line = __LINE__;
   2925			goto err_invalid_target_handle;
   2926		}
   2927		if (security_binder_transaction(proc->cred,
   2928						target_proc->cred) < 0) {
   2929			binder_txn_error("%d:%d transaction credentials failed\n",
   2930				thread->pid, proc->pid);
   2931			return_error = BR_FAILED_REPLY;
   2932			return_error_param = -EPERM;
   2933			return_error_line = __LINE__;
   2934			goto err_invalid_target_handle;
   2935		}
   2936		binder_inner_proc_lock(proc);
   2937
   2938		w = list_first_entry_or_null(&thread->todo,
   2939					     struct binder_work, entry);
   2940		if (!(tr->flags & TF_ONE_WAY) && w &&
   2941		    w->type == BINDER_WORK_TRANSACTION) {
   2942			/*
   2943			 * Do not allow new outgoing transaction from a
   2944			 * thread that has a transaction at the head of
   2945			 * its todo list. Only need to check the head
   2946			 * because binder_select_thread_ilocked picks a
   2947			 * thread from proc->waiting_threads to enqueue
   2948			 * the transaction, and nothing is queued to the
   2949			 * todo list while the thread is on waiting_threads.
   2950			 */
   2951			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
   2952					  proc->pid, thread->pid);
   2953			binder_inner_proc_unlock(proc);
   2954			return_error = BR_FAILED_REPLY;
   2955			return_error_param = -EPROTO;
   2956			return_error_line = __LINE__;
   2957			goto err_bad_todo_list;
   2958		}
   2959
   2960		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
   2961			struct binder_transaction *tmp;
   2962
   2963			tmp = thread->transaction_stack;
   2964			if (tmp->to_thread != thread) {
   2965				spin_lock(&tmp->lock);
   2966				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
   2967					proc->pid, thread->pid, tmp->debug_id,
   2968					tmp->to_proc ? tmp->to_proc->pid : 0,
   2969					tmp->to_thread ?
   2970					tmp->to_thread->pid : 0);
   2971				spin_unlock(&tmp->lock);
   2972				binder_inner_proc_unlock(proc);
   2973				return_error = BR_FAILED_REPLY;
   2974				return_error_param = -EPROTO;
   2975				return_error_line = __LINE__;
   2976				goto err_bad_call_stack;
   2977			}
   2978			while (tmp) {
   2979				struct binder_thread *from;
   2980
   2981				spin_lock(&tmp->lock);
   2982				from = tmp->from;
   2983				if (from && from->proc == target_proc) {
   2984					atomic_inc(&from->tmp_ref);
   2985					target_thread = from;
   2986					spin_unlock(&tmp->lock);
   2987					break;
   2988				}
   2989				spin_unlock(&tmp->lock);
   2990				tmp = tmp->from_parent;
   2991			}
   2992		}
   2993		binder_inner_proc_unlock(proc);
   2994	}
   2995	if (target_thread)
   2996		e->to_thread = target_thread->pid;
   2997	e->to_proc = target_proc->pid;
   2998
   2999	/* TODO: reuse incoming transaction for reply */
   3000	t = kzalloc(sizeof(*t), GFP_KERNEL);
   3001	if (t == NULL) {
   3002		binder_txn_error("%d:%d cannot allocate transaction\n",
   3003			thread->pid, proc->pid);
   3004		return_error = BR_FAILED_REPLY;
   3005		return_error_param = -ENOMEM;
   3006		return_error_line = __LINE__;
   3007		goto err_alloc_t_failed;
   3008	}
   3009	INIT_LIST_HEAD(&t->fd_fixups);
   3010	binder_stats_created(BINDER_STAT_TRANSACTION);
   3011	spin_lock_init(&t->lock);
   3012
   3013	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
   3014	if (tcomplete == NULL) {
   3015		binder_txn_error("%d:%d cannot allocate work for transaction\n",
   3016			thread->pid, proc->pid);
   3017		return_error = BR_FAILED_REPLY;
   3018		return_error_param = -ENOMEM;
   3019		return_error_line = __LINE__;
   3020		goto err_alloc_tcomplete_failed;
   3021	}
   3022	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
   3023
   3024	t->debug_id = t_debug_id;
   3025
   3026	if (reply)
   3027		binder_debug(BINDER_DEBUG_TRANSACTION,
   3028			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
   3029			     proc->pid, thread->pid, t->debug_id,
   3030			     target_proc->pid, target_thread->pid,
   3031			     (u64)tr->data.ptr.buffer,
   3032			     (u64)tr->data.ptr.offsets,
   3033			     (u64)tr->data_size, (u64)tr->offsets_size,
   3034			     (u64)extra_buffers_size);
   3035	else
   3036		binder_debug(BINDER_DEBUG_TRANSACTION,
   3037			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
   3038			     proc->pid, thread->pid, t->debug_id,
   3039			     target_proc->pid, target_node->debug_id,
   3040			     (u64)tr->data.ptr.buffer,
   3041			     (u64)tr->data.ptr.offsets,
   3042			     (u64)tr->data_size, (u64)tr->offsets_size,
   3043			     (u64)extra_buffers_size);
   3044
   3045	if (!reply && !(tr->flags & TF_ONE_WAY))
   3046		t->from = thread;
   3047	else
   3048		t->from = NULL;
   3049	t->sender_euid = task_euid(proc->tsk);
   3050	t->to_proc = target_proc;
   3051	t->to_thread = target_thread;
   3052	t->code = tr->code;
   3053	t->flags = tr->flags;
   3054	t->priority = task_nice(current);
   3055
   3056	if (target_node && target_node->txn_security_ctx) {
   3057		u32 secid;
   3058		size_t added_size;
   3059
   3060		security_cred_getsecid(proc->cred, &secid);
   3061		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
   3062		if (ret) {
   3063			binder_txn_error("%d:%d failed to get security context\n",
   3064				thread->pid, proc->pid);
   3065			return_error = BR_FAILED_REPLY;
   3066			return_error_param = ret;
   3067			return_error_line = __LINE__;
   3068			goto err_get_secctx_failed;
   3069		}
   3070		added_size = ALIGN(secctx_sz, sizeof(u64));
   3071		extra_buffers_size += added_size;
   3072		if (extra_buffers_size < added_size) {
   3073			binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
   3074				thread->pid, proc->pid);
   3075			return_error = BR_FAILED_REPLY;
   3076			return_error_param = -EINVAL;
   3077			return_error_line = __LINE__;
   3078			goto err_bad_extra_size;
   3079		}
   3080	}
   3081
   3082	trace_binder_transaction(reply, t, target_node);
   3083
   3084	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
   3085		tr->offsets_size, extra_buffers_size,
   3086		!reply && (t->flags & TF_ONE_WAY), current->tgid);
   3087	if (IS_ERR(t->buffer)) {
   3088		char *s;
   3089
   3090		ret = PTR_ERR(t->buffer);
   3091		s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
   3092			: (ret == -ENOSPC) ? ": no space left"
   3093			: (ret == -ENOMEM) ? ": memory allocation failed"
   3094			: "";
   3095		binder_txn_error("cannot allocate buffer%s", s);
   3096
   3097		return_error_param = PTR_ERR(t->buffer);
   3098		return_error = return_error_param == -ESRCH ?
   3099			BR_DEAD_REPLY : BR_FAILED_REPLY;
   3100		return_error_line = __LINE__;
   3101		t->buffer = NULL;
   3102		goto err_binder_alloc_buf_failed;
   3103	}
   3104	if (secctx) {
   3105		int err;
   3106		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
   3107				    ALIGN(tr->offsets_size, sizeof(void *)) +
   3108				    ALIGN(extra_buffers_size, sizeof(void *)) -
   3109				    ALIGN(secctx_sz, sizeof(u64));
   3110
   3111		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
   3112		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
   3113						  t->buffer, buf_offset,
   3114						  secctx, secctx_sz);
   3115		if (err) {
   3116			t->security_ctx = 0;
   3117			WARN_ON(1);
   3118		}
   3119		security_release_secctx(secctx, secctx_sz);
   3120		secctx = NULL;
   3121	}
   3122	t->buffer->debug_id = t->debug_id;
   3123	t->buffer->transaction = t;
   3124	t->buffer->target_node = target_node;
   3125	t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
   3126	trace_binder_transaction_alloc_buf(t->buffer);
   3127
   3128	if (binder_alloc_copy_user_to_buffer(
   3129				&target_proc->alloc,
   3130				t->buffer,
   3131				ALIGN(tr->data_size, sizeof(void *)),
   3132				(const void __user *)
   3133					(uintptr_t)tr->data.ptr.offsets,
   3134				tr->offsets_size)) {
   3135		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
   3136				proc->pid, thread->pid);
   3137		return_error = BR_FAILED_REPLY;
   3138		return_error_param = -EFAULT;
   3139		return_error_line = __LINE__;
   3140		goto err_copy_data_failed;
   3141	}
   3142	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
   3143		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
   3144				proc->pid, thread->pid, (u64)tr->offsets_size);
   3145		return_error = BR_FAILED_REPLY;
   3146		return_error_param = -EINVAL;
   3147		return_error_line = __LINE__;
   3148		goto err_bad_offset;
   3149	}
   3150	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
   3151		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
   3152				  proc->pid, thread->pid,
   3153				  (u64)extra_buffers_size);
   3154		return_error = BR_FAILED_REPLY;
   3155		return_error_param = -EINVAL;
   3156		return_error_line = __LINE__;
   3157		goto err_bad_offset;
   3158	}
   3159	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
   3160	buffer_offset = off_start_offset;
   3161	off_end_offset = off_start_offset + tr->offsets_size;
   3162	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
   3163	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
   3164		ALIGN(secctx_sz, sizeof(u64));
   3165	off_min = 0;
   3166	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
   3167	     buffer_offset += sizeof(binder_size_t)) {
   3168		struct binder_object_header *hdr;
   3169		size_t object_size;
   3170		struct binder_object object;
   3171		binder_size_t object_offset;
   3172		binder_size_t copy_size;
   3173
   3174		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
   3175						  &object_offset,
   3176						  t->buffer,
   3177						  buffer_offset,
   3178						  sizeof(object_offset))) {
   3179			binder_txn_error("%d:%d copy offset from buffer failed\n",
   3180				thread->pid, proc->pid);
   3181			return_error = BR_FAILED_REPLY;
   3182			return_error_param = -EINVAL;
   3183			return_error_line = __LINE__;
   3184			goto err_bad_offset;
   3185		}
   3186
   3187		/*
   3188		 * Copy the source user buffer up to the next object
   3189		 * that will be processed.
   3190		 */
   3191		copy_size = object_offset - user_offset;
   3192		if (copy_size && (user_offset > object_offset ||
   3193				binder_alloc_copy_user_to_buffer(
   3194					&target_proc->alloc,
   3195					t->buffer, user_offset,
   3196					user_buffer + user_offset,
   3197					copy_size))) {
   3198			binder_user_error("%d:%d got transaction with invalid data ptr\n",
   3199					proc->pid, thread->pid);
   3200			return_error = BR_FAILED_REPLY;
   3201			return_error_param = -EFAULT;
   3202			return_error_line = __LINE__;
   3203			goto err_copy_data_failed;
   3204		}
   3205		object_size = binder_get_object(target_proc, user_buffer,
   3206				t->buffer, object_offset, &object);
   3207		if (object_size == 0 || object_offset < off_min) {
   3208			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
   3209					  proc->pid, thread->pid,
   3210					  (u64)object_offset,
   3211					  (u64)off_min,
   3212					  (u64)t->buffer->data_size);
   3213			return_error = BR_FAILED_REPLY;
   3214			return_error_param = -EINVAL;
   3215			return_error_line = __LINE__;
   3216			goto err_bad_offset;
   3217		}
   3218		/*
   3219		 * Set offset to the next buffer fragment to be
   3220		 * copied
   3221		 */
   3222		user_offset = object_offset + object_size;
   3223
   3224		hdr = &object.hdr;
   3225		off_min = object_offset + object_size;
   3226		switch (hdr->type) {
   3227		case BINDER_TYPE_BINDER:
   3228		case BINDER_TYPE_WEAK_BINDER: {
   3229			struct flat_binder_object *fp;
   3230
   3231			fp = to_flat_binder_object(hdr);
   3232			ret = binder_translate_binder(fp, t, thread);
   3233
   3234			if (ret < 0 ||
   3235			    binder_alloc_copy_to_buffer(&target_proc->alloc,
   3236							t->buffer,
   3237							object_offset,
   3238							fp, sizeof(*fp))) {
   3239				binder_txn_error("%d:%d translate binder failed\n",
   3240					thread->pid, proc->pid);
   3241				return_error = BR_FAILED_REPLY;
   3242				return_error_param = ret;
   3243				return_error_line = __LINE__;
   3244				goto err_translate_failed;
   3245			}
   3246		} break;
   3247		case BINDER_TYPE_HANDLE:
   3248		case BINDER_TYPE_WEAK_HANDLE: {
   3249			struct flat_binder_object *fp;
   3250
   3251			fp = to_flat_binder_object(hdr);
   3252			ret = binder_translate_handle(fp, t, thread);
   3253			if (ret < 0 ||
   3254			    binder_alloc_copy_to_buffer(&target_proc->alloc,
   3255							t->buffer,
   3256							object_offset,
   3257							fp, sizeof(*fp))) {
   3258				binder_txn_error("%d:%d translate handle failed\n",
   3259					thread->pid, proc->pid);
   3260				return_error = BR_FAILED_REPLY;
   3261				return_error_param = ret;
   3262				return_error_line = __LINE__;
   3263				goto err_translate_failed;
   3264			}
   3265		} break;
   3266
   3267		case BINDER_TYPE_FD: {
   3268			struct binder_fd_object *fp = to_binder_fd_object(hdr);
   3269			binder_size_t fd_offset = object_offset +
   3270				(uintptr_t)&fp->fd - (uintptr_t)fp;
   3271			int ret = binder_translate_fd(fp->fd, fd_offset, t,
   3272						      thread, in_reply_to);
   3273
   3274			fp->pad_binder = 0;
   3275			if (ret < 0 ||
   3276			    binder_alloc_copy_to_buffer(&target_proc->alloc,
   3277							t->buffer,
   3278							object_offset,
   3279							fp, sizeof(*fp))) {
   3280				binder_txn_error("%d:%d translate fd failed\n",
   3281					thread->pid, proc->pid);
   3282				return_error = BR_FAILED_REPLY;
   3283				return_error_param = ret;
   3284				return_error_line = __LINE__;
   3285				goto err_translate_failed;
   3286			}
   3287		} break;
   3288		case BINDER_TYPE_FDA: {
   3289			struct binder_object ptr_object;
   3290			binder_size_t parent_offset;
   3291			struct binder_object user_object;
   3292			size_t user_parent_size;
   3293			struct binder_fd_array_object *fda =
   3294				to_binder_fd_array_object(hdr);
   3295			size_t num_valid = (buffer_offset - off_start_offset) /
   3296						sizeof(binder_size_t);
   3297			struct binder_buffer_object *parent =
   3298				binder_validate_ptr(target_proc, t->buffer,
   3299						    &ptr_object, fda->parent,
   3300						    off_start_offset,
   3301						    &parent_offset,
   3302						    num_valid);
   3303			if (!parent) {
   3304				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
   3305						  proc->pid, thread->pid);
   3306				return_error = BR_FAILED_REPLY;
   3307				return_error_param = -EINVAL;
   3308				return_error_line = __LINE__;
   3309				goto err_bad_parent;
   3310			}
   3311			if (!binder_validate_fixup(target_proc, t->buffer,
   3312						   off_start_offset,
   3313						   parent_offset,
   3314						   fda->parent_offset,
   3315						   last_fixup_obj_off,
   3316						   last_fixup_min_off)) {
   3317				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
   3318						  proc->pid, thread->pid);
   3319				return_error = BR_FAILED_REPLY;
   3320				return_error_param = -EINVAL;
   3321				return_error_line = __LINE__;
   3322				goto err_bad_parent;
   3323			}
   3324			/*
   3325			 * We need to read the user version of the parent
   3326			 * object to get the original user offset
   3327			 */
   3328			user_parent_size =
   3329				binder_get_object(proc, user_buffer, t->buffer,
   3330						  parent_offset, &user_object);
   3331			if (user_parent_size != sizeof(user_object.bbo)) {
   3332				binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
   3333						  proc->pid, thread->pid,
   3334						  user_parent_size,
   3335						  sizeof(user_object.bbo));
   3336				return_error = BR_FAILED_REPLY;
   3337				return_error_param = -EINVAL;
   3338				return_error_line = __LINE__;
   3339				goto err_bad_parent;
   3340			}
   3341			ret = binder_translate_fd_array(&pf_head, fda,
   3342							user_buffer, parent,
   3343							&user_object.bbo, t,
   3344							thread, in_reply_to);
   3345			if (!ret)
   3346				ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
   3347								  t->buffer,
   3348								  object_offset,
   3349								  fda, sizeof(*fda));
   3350			if (ret) {
   3351				binder_txn_error("%d:%d translate fd array failed\n",
   3352					thread->pid, proc->pid);
   3353				return_error = BR_FAILED_REPLY;
   3354				return_error_param = ret > 0 ? -EINVAL : ret;
   3355				return_error_line = __LINE__;
   3356				goto err_translate_failed;
   3357			}
   3358			last_fixup_obj_off = parent_offset;
   3359			last_fixup_min_off =
   3360				fda->parent_offset + sizeof(u32) * fda->num_fds;
   3361		} break;
   3362		case BINDER_TYPE_PTR: {
   3363			struct binder_buffer_object *bp =
   3364				to_binder_buffer_object(hdr);
   3365			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
   3366			size_t num_valid;
   3367
   3368			if (bp->length > buf_left) {
   3369				binder_user_error("%d:%d got transaction with too large buffer\n",
   3370						  proc->pid, thread->pid);
   3371				return_error = BR_FAILED_REPLY;
   3372				return_error_param = -EINVAL;
   3373				return_error_line = __LINE__;
   3374				goto err_bad_offset;
   3375			}
   3376			ret = binder_defer_copy(&sgc_head, sg_buf_offset,
   3377				(const void __user *)(uintptr_t)bp->buffer,
   3378				bp->length);
   3379			if (ret) {
   3380				binder_txn_error("%d:%d deferred copy failed\n",
   3381					thread->pid, proc->pid);
   3382				return_error = BR_FAILED_REPLY;
   3383				return_error_param = ret;
   3384				return_error_line = __LINE__;
   3385				goto err_translate_failed;
   3386			}
   3387			/* Fixup buffer pointer to target proc address space */
   3388			bp->buffer = (uintptr_t)
   3389				t->buffer->user_data + sg_buf_offset;
   3390			sg_buf_offset += ALIGN(bp->length, sizeof(u64));
   3391
   3392			num_valid = (buffer_offset - off_start_offset) /
   3393					sizeof(binder_size_t);
   3394			ret = binder_fixup_parent(&pf_head, t,
   3395						  thread, bp,
   3396						  off_start_offset,
   3397						  num_valid,
   3398						  last_fixup_obj_off,
   3399						  last_fixup_min_off);
   3400			if (ret < 0 ||
   3401			    binder_alloc_copy_to_buffer(&target_proc->alloc,
   3402							t->buffer,
   3403							object_offset,
   3404							bp, sizeof(*bp))) {
   3405				binder_txn_error("%d:%d failed to fixup parent\n",
   3406					thread->pid, proc->pid);
   3407				return_error = BR_FAILED_REPLY;
   3408				return_error_param = ret;
   3409				return_error_line = __LINE__;
   3410				goto err_translate_failed;
   3411			}
   3412			last_fixup_obj_off = object_offset;
   3413			last_fixup_min_off = 0;
   3414		} break;
   3415		default:
   3416			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
   3417				proc->pid, thread->pid, hdr->type);
   3418			return_error = BR_FAILED_REPLY;
   3419			return_error_param = -EINVAL;
   3420			return_error_line = __LINE__;
   3421			goto err_bad_object_type;
   3422		}
   3423	}
   3424	/* Done processing objects, copy the rest of the buffer */
   3425	if (binder_alloc_copy_user_to_buffer(
   3426				&target_proc->alloc,
   3427				t->buffer, user_offset,
   3428				user_buffer + user_offset,
   3429				tr->data_size - user_offset)) {
   3430		binder_user_error("%d:%d got transaction with invalid data ptr\n",
   3431				proc->pid, thread->pid);
   3432		return_error = BR_FAILED_REPLY;
   3433		return_error_param = -EFAULT;
   3434		return_error_line = __LINE__;
   3435		goto err_copy_data_failed;
   3436	}
   3437
   3438	ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
   3439					    &sgc_head, &pf_head);
   3440	if (ret) {
   3441		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
   3442				  proc->pid, thread->pid);
   3443		return_error = BR_FAILED_REPLY;
   3444		return_error_param = ret;
   3445		return_error_line = __LINE__;
   3446		goto err_copy_data_failed;
   3447	}
   3448	if (t->buffer->oneway_spam_suspect)
   3449		tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
   3450	else
   3451		tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
   3452	t->work.type = BINDER_WORK_TRANSACTION;
   3453
   3454	if (reply) {
   3455		binder_enqueue_thread_work(thread, tcomplete);
   3456		binder_inner_proc_lock(target_proc);
   3457		if (target_thread->is_dead) {
   3458			return_error = BR_DEAD_REPLY;
   3459			binder_inner_proc_unlock(target_proc);
   3460			goto err_dead_proc_or_thread;
   3461		}
   3462		BUG_ON(t->buffer->async_transaction != 0);
   3463		binder_pop_transaction_ilocked(target_thread, in_reply_to);
   3464		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
   3465		target_proc->outstanding_txns++;
   3466		binder_inner_proc_unlock(target_proc);
   3467		wake_up_interruptible_sync(&target_thread->wait);
   3468		binder_free_transaction(in_reply_to);
   3469	} else if (!(t->flags & TF_ONE_WAY)) {
   3470		BUG_ON(t->buffer->async_transaction != 0);
   3471		binder_inner_proc_lock(proc);
   3472		/*
   3473		 * Defer the TRANSACTION_COMPLETE, so we don't return to
   3474		 * userspace immediately; this allows the target process to
   3475		 * immediately start processing this transaction, reducing
   3476		 * latency. We will then return the TRANSACTION_COMPLETE when
   3477		 * the target replies (or there is an error).
   3478		 */
   3479		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
   3480		t->need_reply = 1;
   3481		t->from_parent = thread->transaction_stack;
   3482		thread->transaction_stack = t;
   3483		binder_inner_proc_unlock(proc);
   3484		return_error = binder_proc_transaction(t,
   3485				target_proc, target_thread);
   3486		if (return_error) {
   3487			binder_inner_proc_lock(proc);
   3488			binder_pop_transaction_ilocked(thread, t);
   3489			binder_inner_proc_unlock(proc);
   3490			goto err_dead_proc_or_thread;
   3491		}
   3492	} else {
   3493		BUG_ON(target_node == NULL);
   3494		BUG_ON(t->buffer->async_transaction != 1);
   3495		binder_enqueue_thread_work(thread, tcomplete);
   3496		return_error = binder_proc_transaction(t, target_proc, NULL);
   3497		if (return_error)
   3498			goto err_dead_proc_or_thread;
   3499	}
   3500	if (target_thread)
   3501		binder_thread_dec_tmpref(target_thread);
   3502	binder_proc_dec_tmpref(target_proc);
   3503	if (target_node)
   3504		binder_dec_node_tmpref(target_node);
   3505	/*
   3506	 * write barrier to synchronize with initialization
   3507	 * of log entry
   3508	 */
   3509	smp_wmb();
   3510	WRITE_ONCE(e->debug_id_done, t_debug_id);
   3511	return;
   3512
   3513err_dead_proc_or_thread:
   3514	binder_txn_error("%d:%d dead process or thread\n",
   3515		thread->pid, proc->pid);
   3516	return_error_line = __LINE__;
   3517	binder_dequeue_work(proc, tcomplete);
   3518err_translate_failed:
   3519err_bad_object_type:
   3520err_bad_offset:
   3521err_bad_parent:
   3522err_copy_data_failed:
   3523	binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
   3524	binder_free_txn_fixups(t);
   3525	trace_binder_transaction_failed_buffer_release(t->buffer);
   3526	binder_transaction_buffer_release(target_proc, NULL, t->buffer,
   3527					  buffer_offset, true);
   3528	if (target_node)
   3529		binder_dec_node_tmpref(target_node);
   3530	target_node = NULL;
   3531	t->buffer->transaction = NULL;
   3532	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
   3533err_binder_alloc_buf_failed:
   3534err_bad_extra_size:
   3535	if (secctx)
   3536		security_release_secctx(secctx, secctx_sz);
   3537err_get_secctx_failed:
   3538	kfree(tcomplete);
   3539	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
   3540err_alloc_tcomplete_failed:
   3541	if (trace_binder_txn_latency_free_enabled())
   3542		binder_txn_latency_free(t);
   3543	kfree(t);
   3544	binder_stats_deleted(BINDER_STAT_TRANSACTION);
   3545err_alloc_t_failed:
   3546err_bad_todo_list:
   3547err_bad_call_stack:
   3548err_empty_call_stack:
   3549err_dead_binder:
   3550err_invalid_target_handle:
   3551	if (target_node) {
   3552		binder_dec_node(target_node, 1, 0);
   3553		binder_dec_node_tmpref(target_node);
   3554	}
   3555
   3556	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
   3557		     "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
   3558		     proc->pid, thread->pid, reply ? "reply" :
   3559		     (tr->flags & TF_ONE_WAY ? "async" : "call"),
   3560		     target_proc ? target_proc->pid : 0,
   3561		     target_thread ? target_thread->pid : 0,
   3562		     t_debug_id, return_error, return_error_param,
   3563		     (u64)tr->data_size, (u64)tr->offsets_size,
   3564		     return_error_line);
   3565
   3566	if (target_thread)
   3567		binder_thread_dec_tmpref(target_thread);
   3568	if (target_proc)
   3569		binder_proc_dec_tmpref(target_proc);
   3570
   3571	{
   3572		struct binder_transaction_log_entry *fe;
   3573
   3574		e->return_error = return_error;
   3575		e->return_error_param = return_error_param;
   3576		e->return_error_line = return_error_line;
   3577		fe = binder_transaction_log_add(&binder_transaction_log_failed);
   3578		*fe = *e;
   3579		/*
   3580		 * write barrier to synchronize with initialization
   3581		 * of log entry
   3582		 */
   3583		smp_wmb();
   3584		WRITE_ONCE(e->debug_id_done, t_debug_id);
   3585		WRITE_ONCE(fe->debug_id_done, t_debug_id);
   3586	}
   3587
   3588	BUG_ON(thread->return_error.cmd != BR_OK);
   3589	if (in_reply_to) {
   3590		binder_set_txn_from_error(in_reply_to, t_debug_id,
   3591				return_error, return_error_param);
   3592		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
   3593		binder_enqueue_thread_work(thread, &thread->return_error.work);
   3594		binder_send_failed_reply(in_reply_to, return_error);
   3595	} else {
   3596		binder_inner_proc_lock(proc);
   3597		binder_set_extended_error(&thread->ee, t_debug_id,
   3598				return_error, return_error_param);
   3599		binder_inner_proc_unlock(proc);
   3600		thread->return_error.cmd = return_error;
   3601		binder_enqueue_thread_work(thread, &thread->return_error.work);
   3602	}
   3603}
   3604
   3605/**
   3606 * binder_free_buf() - free the specified buffer
   3607 * @proc:	binder proc that owns buffer
   3608 * @buffer:	buffer to be freed
   3609 * @is_failure:	failed to send transaction
   3610 *
   3611 * If buffer for an async transaction, enqueue the next async
   3612 * transaction from the node.
   3613 *
   3614 * Cleanup buffer and free it.
   3615 */
   3616static void
   3617binder_free_buf(struct binder_proc *proc,
   3618		struct binder_thread *thread,
   3619		struct binder_buffer *buffer, bool is_failure)
   3620{
   3621	binder_inner_proc_lock(proc);
   3622	if (buffer->transaction) {
   3623		buffer->transaction->buffer = NULL;
   3624		buffer->transaction = NULL;
   3625	}
   3626	binder_inner_proc_unlock(proc);
   3627	if (buffer->async_transaction && buffer->target_node) {
   3628		struct binder_node *buf_node;
   3629		struct binder_work *w;
   3630
   3631		buf_node = buffer->target_node;
   3632		binder_node_inner_lock(buf_node);
   3633		BUG_ON(!buf_node->has_async_transaction);
   3634		BUG_ON(buf_node->proc != proc);
   3635		w = binder_dequeue_work_head_ilocked(
   3636				&buf_node->async_todo);
   3637		if (!w) {
   3638			buf_node->has_async_transaction = false;
   3639		} else {
   3640			binder_enqueue_work_ilocked(
   3641					w, &proc->todo);
   3642			binder_wakeup_proc_ilocked(proc);
   3643		}
   3644		binder_node_inner_unlock(buf_node);
   3645	}
   3646	trace_binder_transaction_buffer_release(buffer);
   3647	binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
   3648	binder_alloc_free_buf(&proc->alloc, buffer);
   3649}
   3650
   3651static int binder_thread_write(struct binder_proc *proc,
   3652			struct binder_thread *thread,
   3653			binder_uintptr_t binder_buffer, size_t size,
   3654			binder_size_t *consumed)
   3655{
   3656	uint32_t cmd;
   3657	struct binder_context *context = proc->context;
   3658	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
   3659	void __user *ptr = buffer + *consumed;
   3660	void __user *end = buffer + size;
   3661
   3662	while (ptr < end && thread->return_error.cmd == BR_OK) {
   3663		int ret;
   3664
   3665		if (get_user(cmd, (uint32_t __user *)ptr))
   3666			return -EFAULT;
   3667		ptr += sizeof(uint32_t);
   3668		trace_binder_command(cmd);
   3669		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
   3670			atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
   3671			atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
   3672			atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
   3673		}
   3674		switch (cmd) {
   3675		case BC_INCREFS:
   3676		case BC_ACQUIRE:
   3677		case BC_RELEASE:
   3678		case BC_DECREFS: {
   3679			uint32_t target;
   3680			const char *debug_string;
   3681			bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
   3682			bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
   3683			struct binder_ref_data rdata;
   3684
   3685			if (get_user(target, (uint32_t __user *)ptr))
   3686				return -EFAULT;
   3687
   3688			ptr += sizeof(uint32_t);
   3689			ret = -1;
   3690			if (increment && !target) {
   3691				struct binder_node *ctx_mgr_node;
   3692
   3693				mutex_lock(&context->context_mgr_node_lock);
   3694				ctx_mgr_node = context->binder_context_mgr_node;
   3695				if (ctx_mgr_node) {
   3696					if (ctx_mgr_node->proc == proc) {
   3697						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
   3698								  proc->pid, thread->pid);
   3699						mutex_unlock(&context->context_mgr_node_lock);
   3700						return -EINVAL;
   3701					}
   3702					ret = binder_inc_ref_for_node(
   3703							proc, ctx_mgr_node,
   3704							strong, NULL, &rdata);
   3705				}
   3706				mutex_unlock(&context->context_mgr_node_lock);
   3707			}
   3708			if (ret)
   3709				ret = binder_update_ref_for_handle(
   3710						proc, target, increment, strong,
   3711						&rdata);
   3712			if (!ret && rdata.desc != target) {
   3713				binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
   3714					proc->pid, thread->pid,
   3715					target, rdata.desc);
   3716			}
   3717			switch (cmd) {
   3718			case BC_INCREFS:
   3719				debug_string = "IncRefs";
   3720				break;
   3721			case BC_ACQUIRE:
   3722				debug_string = "Acquire";
   3723				break;
   3724			case BC_RELEASE:
   3725				debug_string = "Release";
   3726				break;
   3727			case BC_DECREFS:
   3728			default:
   3729				debug_string = "DecRefs";
   3730				break;
   3731			}
   3732			if (ret) {
   3733				binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
   3734					proc->pid, thread->pid, debug_string,
   3735					strong, target, ret);
   3736				break;
   3737			}
   3738			binder_debug(BINDER_DEBUG_USER_REFS,
   3739				     "%d:%d %s ref %d desc %d s %d w %d\n",
   3740				     proc->pid, thread->pid, debug_string,
   3741				     rdata.debug_id, rdata.desc, rdata.strong,
   3742				     rdata.weak);
   3743			break;
   3744		}
   3745		case BC_INCREFS_DONE:
   3746		case BC_ACQUIRE_DONE: {
   3747			binder_uintptr_t node_ptr;
   3748			binder_uintptr_t cookie;
   3749			struct binder_node *node;
   3750			bool free_node;
   3751
   3752			if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
   3753				return -EFAULT;
   3754			ptr += sizeof(binder_uintptr_t);
   3755			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
   3756				return -EFAULT;
   3757			ptr += sizeof(binder_uintptr_t);
   3758			node = binder_get_node(proc, node_ptr);
   3759			if (node == NULL) {
   3760				binder_user_error("%d:%d %s u%016llx no match\n",
   3761					proc->pid, thread->pid,
   3762					cmd == BC_INCREFS_DONE ?
   3763					"BC_INCREFS_DONE" :
   3764					"BC_ACQUIRE_DONE",
   3765					(u64)node_ptr);
   3766				break;
   3767			}
   3768			if (cookie != node->cookie) {
   3769				binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
   3770					proc->pid, thread->pid,
   3771					cmd == BC_INCREFS_DONE ?
   3772					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
   3773					(u64)node_ptr, node->debug_id,
   3774					(u64)cookie, (u64)node->cookie);
   3775				binder_put_node(node);
   3776				break;
   3777			}
   3778			binder_node_inner_lock(node);
   3779			if (cmd == BC_ACQUIRE_DONE) {
   3780				if (node->pending_strong_ref == 0) {
   3781					binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
   3782						proc->pid, thread->pid,
   3783						node->debug_id);
   3784					binder_node_inner_unlock(node);
   3785					binder_put_node(node);
   3786					break;
   3787				}
   3788				node->pending_strong_ref = 0;
   3789			} else {
   3790				if (node->pending_weak_ref == 0) {
   3791					binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
   3792						proc->pid, thread->pid,
   3793						node->debug_id);
   3794					binder_node_inner_unlock(node);
   3795					binder_put_node(node);
   3796					break;
   3797				}
   3798				node->pending_weak_ref = 0;
   3799			}
   3800			free_node = binder_dec_node_nilocked(node,
   3801					cmd == BC_ACQUIRE_DONE, 0);
   3802			WARN_ON(free_node);
   3803			binder_debug(BINDER_DEBUG_USER_REFS,
   3804				     "%d:%d %s node %d ls %d lw %d tr %d\n",
   3805				     proc->pid, thread->pid,
   3806				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
   3807				     node->debug_id, node->local_strong_refs,
   3808				     node->local_weak_refs, node->tmp_refs);
   3809			binder_node_inner_unlock(node);
   3810			binder_put_node(node);
   3811			break;
   3812		}
   3813		case BC_ATTEMPT_ACQUIRE:
   3814			pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
   3815			return -EINVAL;
   3816		case BC_ACQUIRE_RESULT:
   3817			pr_err("BC_ACQUIRE_RESULT not supported\n");
   3818			return -EINVAL;
   3819
   3820		case BC_FREE_BUFFER: {
   3821			binder_uintptr_t data_ptr;
   3822			struct binder_buffer *buffer;
   3823
   3824			if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
   3825				return -EFAULT;
   3826			ptr += sizeof(binder_uintptr_t);
   3827
   3828			buffer = binder_alloc_prepare_to_free(&proc->alloc,
   3829							      data_ptr);
   3830			if (IS_ERR_OR_NULL(buffer)) {
   3831				if (PTR_ERR(buffer) == -EPERM) {
   3832					binder_user_error(
   3833						"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
   3834						proc->pid, thread->pid,
   3835						(u64)data_ptr);
   3836				} else {
   3837					binder_user_error(
   3838						"%d:%d BC_FREE_BUFFER u%016llx no match\n",
   3839						proc->pid, thread->pid,
   3840						(u64)data_ptr);
   3841				}
   3842				break;
   3843			}
   3844			binder_debug(BINDER_DEBUG_FREE_BUFFER,
   3845				     "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
   3846				     proc->pid, thread->pid, (u64)data_ptr,
   3847				     buffer->debug_id,
   3848				     buffer->transaction ? "active" : "finished");
   3849			binder_free_buf(proc, thread, buffer, false);
   3850			break;
   3851		}
   3852
   3853		case BC_TRANSACTION_SG:
   3854		case BC_REPLY_SG: {
   3855			struct binder_transaction_data_sg tr;
   3856
   3857			if (copy_from_user(&tr, ptr, sizeof(tr)))
   3858				return -EFAULT;
   3859			ptr += sizeof(tr);
   3860			binder_transaction(proc, thread, &tr.transaction_data,
   3861					   cmd == BC_REPLY_SG, tr.buffers_size);
   3862			break;
   3863		}
   3864		case BC_TRANSACTION:
   3865		case BC_REPLY: {
   3866			struct binder_transaction_data tr;
   3867
   3868			if (copy_from_user(&tr, ptr, sizeof(tr)))
   3869				return -EFAULT;
   3870			ptr += sizeof(tr);
   3871			binder_transaction(proc, thread, &tr,
   3872					   cmd == BC_REPLY, 0);
   3873			break;
   3874		}
   3875
   3876		case BC_REGISTER_LOOPER:
   3877			binder_debug(BINDER_DEBUG_THREADS,
   3878				     "%d:%d BC_REGISTER_LOOPER\n",
   3879				     proc->pid, thread->pid);
   3880			binder_inner_proc_lock(proc);
   3881			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
   3882				thread->looper |= BINDER_LOOPER_STATE_INVALID;
   3883				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
   3884					proc->pid, thread->pid);
   3885			} else if (proc->requested_threads == 0) {
   3886				thread->looper |= BINDER_LOOPER_STATE_INVALID;
   3887				binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
   3888					proc->pid, thread->pid);
   3889			} else {
   3890				proc->requested_threads--;
   3891				proc->requested_threads_started++;
   3892			}
   3893			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
   3894			binder_inner_proc_unlock(proc);
   3895			break;
   3896		case BC_ENTER_LOOPER:
   3897			binder_debug(BINDER_DEBUG_THREADS,
   3898				     "%d:%d BC_ENTER_LOOPER\n",
   3899				     proc->pid, thread->pid);
   3900			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
   3901				thread->looper |= BINDER_LOOPER_STATE_INVALID;
   3902				binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
   3903					proc->pid, thread->pid);
   3904			}
   3905			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
   3906			break;
   3907		case BC_EXIT_LOOPER:
   3908			binder_debug(BINDER_DEBUG_THREADS,
   3909				     "%d:%d BC_EXIT_LOOPER\n",
   3910				     proc->pid, thread->pid);
   3911			thread->looper |= BINDER_LOOPER_STATE_EXITED;
   3912			break;
   3913
   3914		case BC_REQUEST_DEATH_NOTIFICATION:
   3915		case BC_CLEAR_DEATH_NOTIFICATION: {
   3916			uint32_t target;
   3917			binder_uintptr_t cookie;
   3918			struct binder_ref *ref;
   3919			struct binder_ref_death *death = NULL;
   3920
   3921			if (get_user(target, (uint32_t __user *)ptr))
   3922				return -EFAULT;
   3923			ptr += sizeof(uint32_t);
   3924			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
   3925				return -EFAULT;
   3926			ptr += sizeof(binder_uintptr_t);
   3927			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
   3928				/*
   3929				 * Allocate memory for death notification
   3930				 * before taking lock
   3931				 */
   3932				death = kzalloc(sizeof(*death), GFP_KERNEL);
   3933				if (death == NULL) {
   3934					WARN_ON(thread->return_error.cmd !=
   3935						BR_OK);
   3936					thread->return_error.cmd = BR_ERROR;
   3937					binder_enqueue_thread_work(
   3938						thread,
   3939						&thread->return_error.work);
   3940					binder_debug(
   3941						BINDER_DEBUG_FAILED_TRANSACTION,
   3942						"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
   3943						proc->pid, thread->pid);
   3944					break;
   3945				}
   3946			}
   3947			binder_proc_lock(proc);
   3948			ref = binder_get_ref_olocked(proc, target, false);
   3949			if (ref == NULL) {
   3950				binder_user_error("%d:%d %s invalid ref %d\n",
   3951					proc->pid, thread->pid,
   3952					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
   3953					"BC_REQUEST_DEATH_NOTIFICATION" :
   3954					"BC_CLEAR_DEATH_NOTIFICATION",
   3955					target);
   3956				binder_proc_unlock(proc);
   3957				kfree(death);
   3958				break;
   3959			}
   3960
   3961			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
   3962				     "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
   3963				     proc->pid, thread->pid,
   3964				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
   3965				     "BC_REQUEST_DEATH_NOTIFICATION" :
   3966				     "BC_CLEAR_DEATH_NOTIFICATION",
   3967				     (u64)cookie, ref->data.debug_id,
   3968				     ref->data.desc, ref->data.strong,
   3969				     ref->data.weak, ref->node->debug_id);
   3970
   3971			binder_node_lock(ref->node);
   3972			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
   3973				if (ref->death) {
   3974					binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
   3975						proc->pid, thread->pid);
   3976					binder_node_unlock(ref->node);
   3977					binder_proc_unlock(proc);
   3978					kfree(death);
   3979					break;
   3980				}
   3981				binder_stats_created(BINDER_STAT_DEATH);
   3982				INIT_LIST_HEAD(&death->work.entry);
   3983				death->cookie = cookie;
   3984				ref->death = death;
   3985				if (ref->node->proc == NULL) {
   3986					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
   3987
   3988					binder_inner_proc_lock(proc);
   3989					binder_enqueue_work_ilocked(
   3990						&ref->death->work, &proc->todo);
   3991					binder_wakeup_proc_ilocked(proc);
   3992					binder_inner_proc_unlock(proc);
   3993				}
   3994			} else {
   3995				if (ref->death == NULL) {
   3996					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
   3997						proc->pid, thread->pid);
   3998					binder_node_unlock(ref->node);
   3999					binder_proc_unlock(proc);
   4000					break;
   4001				}
   4002				death = ref->death;
   4003				if (death->cookie != cookie) {
   4004					binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
   4005						proc->pid, thread->pid,
   4006						(u64)death->cookie,
   4007						(u64)cookie);
   4008					binder_node_unlock(ref->node);
   4009					binder_proc_unlock(proc);
   4010					break;
   4011				}
   4012				ref->death = NULL;
   4013				binder_inner_proc_lock(proc);
   4014				if (list_empty(&death->work.entry)) {
   4015					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
   4016					if (thread->looper &
   4017					    (BINDER_LOOPER_STATE_REGISTERED |
   4018					     BINDER_LOOPER_STATE_ENTERED))
   4019						binder_enqueue_thread_work_ilocked(
   4020								thread,
   4021								&death->work);
   4022					else {
   4023						binder_enqueue_work_ilocked(
   4024								&death->work,
   4025								&proc->todo);
   4026						binder_wakeup_proc_ilocked(
   4027								proc);
   4028					}
   4029				} else {
   4030					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
   4031					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
   4032				}
   4033				binder_inner_proc_unlock(proc);
   4034			}
   4035			binder_node_unlock(ref->node);
   4036			binder_proc_unlock(proc);
   4037		} break;
   4038		case BC_DEAD_BINDER_DONE: {
   4039			struct binder_work *w;
   4040			binder_uintptr_t cookie;
   4041			struct binder_ref_death *death = NULL;
   4042
   4043			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
   4044				return -EFAULT;
   4045
   4046			ptr += sizeof(cookie);
   4047			binder_inner_proc_lock(proc);
   4048			list_for_each_entry(w, &proc->delivered_death,
   4049					    entry) {
   4050				struct binder_ref_death *tmp_death =
   4051					container_of(w,
   4052						     struct binder_ref_death,
   4053						     work);
   4054
   4055				if (tmp_death->cookie == cookie) {
   4056					death = tmp_death;
   4057					break;
   4058				}
   4059			}
   4060			binder_debug(BINDER_DEBUG_DEAD_BINDER,
   4061				     "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
   4062				     proc->pid, thread->pid, (u64)cookie,
   4063				     death);
   4064			if (death == NULL) {
   4065				binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
   4066					proc->pid, thread->pid, (u64)cookie);
   4067				binder_inner_proc_unlock(proc);
   4068				break;
   4069			}
   4070			binder_dequeue_work_ilocked(&death->work);
   4071			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
   4072				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
   4073				if (thread->looper &
   4074					(BINDER_LOOPER_STATE_REGISTERED |
   4075					 BINDER_LOOPER_STATE_ENTERED))
   4076					binder_enqueue_thread_work_ilocked(
   4077						thread, &death->work);
   4078				else {
   4079					binder_enqueue_work_ilocked(
   4080							&death->work,
   4081							&proc->todo);
   4082					binder_wakeup_proc_ilocked(proc);
   4083				}
   4084			}
   4085			binder_inner_proc_unlock(proc);
   4086		} break;
   4087
   4088		default:
   4089			pr_err("%d:%d unknown command %u\n",
   4090			       proc->pid, thread->pid, cmd);
   4091			return -EINVAL;
   4092		}
   4093		*consumed = ptr - buffer;
   4094	}
   4095	return 0;
   4096}
   4097
   4098static void binder_stat_br(struct binder_proc *proc,
   4099			   struct binder_thread *thread, uint32_t cmd)
   4100{
   4101	trace_binder_return(cmd);
   4102	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
   4103		atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
   4104		atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
   4105		atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
   4106	}
   4107}
   4108
   4109static int binder_put_node_cmd(struct binder_proc *proc,
   4110			       struct binder_thread *thread,
   4111			       void __user **ptrp,
   4112			       binder_uintptr_t node_ptr,
   4113			       binder_uintptr_t node_cookie,
   4114			       int node_debug_id,
   4115			       uint32_t cmd, const char *cmd_name)
   4116{
   4117	void __user *ptr = *ptrp;
   4118
   4119	if (put_user(cmd, (uint32_t __user *)ptr))
   4120		return -EFAULT;
   4121	ptr += sizeof(uint32_t);
   4122
   4123	if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
   4124		return -EFAULT;
   4125	ptr += sizeof(binder_uintptr_t);
   4126
   4127	if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
   4128		return -EFAULT;
   4129	ptr += sizeof(binder_uintptr_t);
   4130
   4131	binder_stat_br(proc, thread, cmd);
   4132	binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
   4133		     proc->pid, thread->pid, cmd_name, node_debug_id,
   4134		     (u64)node_ptr, (u64)node_cookie);
   4135
   4136	*ptrp = ptr;
   4137	return 0;
   4138}
   4139
   4140static int binder_wait_for_work(struct binder_thread *thread,
   4141				bool do_proc_work)
   4142{
   4143	DEFINE_WAIT(wait);
   4144	struct binder_proc *proc = thread->proc;
   4145	int ret = 0;
   4146
   4147	freezer_do_not_count();
   4148	binder_inner_proc_lock(proc);
   4149	for (;;) {
   4150		prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
   4151		if (binder_has_work_ilocked(thread, do_proc_work))
   4152			break;
   4153		if (do_proc_work)
   4154			list_add(&thread->waiting_thread_node,
   4155				 &proc->waiting_threads);
   4156		binder_inner_proc_unlock(proc);
   4157		schedule();
   4158		binder_inner_proc_lock(proc);
   4159		list_del_init(&thread->waiting_thread_node);
   4160		if (signal_pending(current)) {
   4161			ret = -EINTR;
   4162			break;
   4163		}
   4164	}
   4165	finish_wait(&thread->wait, &wait);
   4166	binder_inner_proc_unlock(proc);
   4167	freezer_count();
   4168
   4169	return ret;
   4170}
   4171
   4172/**
   4173 * binder_apply_fd_fixups() - finish fd translation
   4174 * @proc:         binder_proc associated @t->buffer
   4175 * @t:	binder transaction with list of fd fixups
   4176 *
   4177 * Now that we are in the context of the transaction target
   4178 * process, we can allocate and install fds. Process the
   4179 * list of fds to translate and fixup the buffer with the
   4180 * new fds first and only then install the files.
   4181 *
   4182 * If we fail to allocate an fd, skip the install and release
   4183 * any fds that have already been allocated.
   4184 */
   4185static int binder_apply_fd_fixups(struct binder_proc *proc,
   4186				  struct binder_transaction *t)
   4187{
   4188	struct binder_txn_fd_fixup *fixup, *tmp;
   4189	int ret = 0;
   4190
   4191	list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
   4192		int fd = get_unused_fd_flags(O_CLOEXEC);
   4193
   4194		if (fd < 0) {
   4195			binder_debug(BINDER_DEBUG_TRANSACTION,
   4196				     "failed fd fixup txn %d fd %d\n",
   4197				     t->debug_id, fd);
   4198			ret = -ENOMEM;
   4199			goto err;
   4200		}
   4201		binder_debug(BINDER_DEBUG_TRANSACTION,
   4202			     "fd fixup txn %d fd %d\n",
   4203			     t->debug_id, fd);
   4204		trace_binder_transaction_fd_recv(t, fd, fixup->offset);
   4205		fixup->target_fd = fd;
   4206		if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
   4207						fixup->offset, &fd,
   4208						sizeof(u32))) {
   4209			ret = -EINVAL;
   4210			goto err;
   4211		}
   4212	}
   4213	list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
   4214		fd_install(fixup->target_fd, fixup->file);
   4215		list_del(&fixup->fixup_entry);
   4216		kfree(fixup);
   4217	}
   4218
   4219	return ret;
   4220
   4221err:
   4222	binder_free_txn_fixups(t);
   4223	return ret;
   4224}
   4225
   4226static int binder_thread_read(struct binder_proc *proc,
   4227			      struct binder_thread *thread,
   4228			      binder_uintptr_t binder_buffer, size_t size,
   4229			      binder_size_t *consumed, int non_block)
   4230{
   4231	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
   4232	void __user *ptr = buffer + *consumed;
   4233	void __user *end = buffer + size;
   4234
   4235	int ret = 0;
   4236	int wait_for_proc_work;
   4237
   4238	if (*consumed == 0) {
   4239		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
   4240			return -EFAULT;
   4241		ptr += sizeof(uint32_t);
   4242	}
   4243
   4244retry:
   4245	binder_inner_proc_lock(proc);
   4246	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
   4247	binder_inner_proc_unlock(proc);
   4248
   4249	thread->looper |= BINDER_LOOPER_STATE_WAITING;
   4250
   4251	trace_binder_wait_for_work(wait_for_proc_work,
   4252				   !!thread->transaction_stack,
   4253				   !binder_worklist_empty(proc, &thread->todo));
   4254	if (wait_for_proc_work) {
   4255		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
   4256					BINDER_LOOPER_STATE_ENTERED))) {
   4257			binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
   4258				proc->pid, thread->pid, thread->looper);
   4259			wait_event_interruptible(binder_user_error_wait,
   4260						 binder_stop_on_user_error < 2);
   4261		}
   4262		binder_set_nice(proc->default_priority);
   4263	}
   4264
   4265	if (non_block) {
   4266		if (!binder_has_work(thread, wait_for_proc_work))
   4267			ret = -EAGAIN;
   4268	} else {
   4269		ret = binder_wait_for_work(thread, wait_for_proc_work);
   4270	}
   4271
   4272	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
   4273
   4274	if (ret)
   4275		return ret;
   4276
   4277	while (1) {
   4278		uint32_t cmd;
   4279		struct binder_transaction_data_secctx tr;
   4280		struct binder_transaction_data *trd = &tr.transaction_data;
   4281		struct binder_work *w = NULL;
   4282		struct list_head *list = NULL;
   4283		struct binder_transaction *t = NULL;
   4284		struct binder_thread *t_from;
   4285		size_t trsize = sizeof(*trd);
   4286
   4287		binder_inner_proc_lock(proc);
   4288		if (!binder_worklist_empty_ilocked(&thread->todo))
   4289			list = &thread->todo;
   4290		else if (!binder_worklist_empty_ilocked(&proc->todo) &&
   4291			   wait_for_proc_work)
   4292			list = &proc->todo;
   4293		else {
   4294			binder_inner_proc_unlock(proc);
   4295
   4296			/* no data added */
   4297			if (ptr - buffer == 4 && !thread->looper_need_return)
   4298				goto retry;
   4299			break;
   4300		}
   4301
   4302		if (end - ptr < sizeof(tr) + 4) {
   4303			binder_inner_proc_unlock(proc);
   4304			break;
   4305		}
   4306		w = binder_dequeue_work_head_ilocked(list);
   4307		if (binder_worklist_empty_ilocked(&thread->todo))
   4308			thread->process_todo = false;
   4309
   4310		switch (w->type) {
   4311		case BINDER_WORK_TRANSACTION: {
   4312			binder_inner_proc_unlock(proc);
   4313			t = container_of(w, struct binder_transaction, work);
   4314		} break;
   4315		case BINDER_WORK_RETURN_ERROR: {
   4316			struct binder_error *e = container_of(
   4317					w, struct binder_error, work);
   4318
   4319			WARN_ON(e->cmd == BR_OK);
   4320			binder_inner_proc_unlock(proc);
   4321			if (put_user(e->cmd, (uint32_t __user *)ptr))
   4322				return -EFAULT;
   4323			cmd = e->cmd;
   4324			e->cmd = BR_OK;
   4325			ptr += sizeof(uint32_t);
   4326
   4327			binder_stat_br(proc, thread, cmd);
   4328		} break;
   4329		case BINDER_WORK_TRANSACTION_COMPLETE:
   4330		case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
   4331			if (proc->oneway_spam_detection_enabled &&
   4332				   w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
   4333				cmd = BR_ONEWAY_SPAM_SUSPECT;
   4334			else
   4335				cmd = BR_TRANSACTION_COMPLETE;
   4336			binder_inner_proc_unlock(proc);
   4337			kfree(w);
   4338			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
   4339			if (put_user(cmd, (uint32_t __user *)ptr))
   4340				return -EFAULT;
   4341			ptr += sizeof(uint32_t);
   4342
   4343			binder_stat_br(proc, thread, cmd);
   4344			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
   4345				     "%d:%d BR_TRANSACTION_COMPLETE\n",
   4346				     proc->pid, thread->pid);
   4347		} break;
   4348		case BINDER_WORK_NODE: {
   4349			struct binder_node *node = container_of(w, struct binder_node, work);
   4350			int strong, weak;
   4351			binder_uintptr_t node_ptr = node->ptr;
   4352			binder_uintptr_t node_cookie = node->cookie;
   4353			int node_debug_id = node->debug_id;
   4354			int has_weak_ref;
   4355			int has_strong_ref;
   4356			void __user *orig_ptr = ptr;
   4357
   4358			BUG_ON(proc != node->proc);
   4359			strong = node->internal_strong_refs ||
   4360					node->local_strong_refs;
   4361			weak = !hlist_empty(&node->refs) ||
   4362					node->local_weak_refs ||
   4363					node->tmp_refs || strong;
   4364			has_strong_ref = node->has_strong_ref;
   4365			has_weak_ref = node->has_weak_ref;
   4366
   4367			if (weak && !has_weak_ref) {
   4368				node->has_weak_ref = 1;
   4369				node->pending_weak_ref = 1;
   4370				node->local_weak_refs++;
   4371			}
   4372			if (strong && !has_strong_ref) {
   4373				node->has_strong_ref = 1;
   4374				node->pending_strong_ref = 1;
   4375				node->local_strong_refs++;
   4376			}
   4377			if (!strong && has_strong_ref)
   4378				node->has_strong_ref = 0;
   4379			if (!weak && has_weak_ref)
   4380				node->has_weak_ref = 0;
   4381			if (!weak && !strong) {
   4382				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
   4383					     "%d:%d node %d u%016llx c%016llx deleted\n",
   4384					     proc->pid, thread->pid,
   4385					     node_debug_id,
   4386					     (u64)node_ptr,
   4387					     (u64)node_cookie);
   4388				rb_erase(&node->rb_node, &proc->nodes);
   4389				binder_inner_proc_unlock(proc);
   4390				binder_node_lock(node);
   4391				/*
   4392				 * Acquire the node lock before freeing the
   4393				 * node to serialize with other threads that
   4394				 * may have been holding the node lock while
   4395				 * decrementing this node (avoids race where
   4396				 * this thread frees while the other thread
   4397				 * is unlocking the node after the final
   4398				 * decrement)
   4399				 */
   4400				binder_node_unlock(node);
   4401				binder_free_node(node);
   4402			} else
   4403				binder_inner_proc_unlock(proc);
   4404
   4405			if (weak && !has_weak_ref)
   4406				ret = binder_put_node_cmd(
   4407						proc, thread, &ptr, node_ptr,
   4408						node_cookie, node_debug_id,
   4409						BR_INCREFS, "BR_INCREFS");
   4410			if (!ret && strong && !has_strong_ref)
   4411				ret = binder_put_node_cmd(
   4412						proc, thread, &ptr, node_ptr,
   4413						node_cookie, node_debug_id,
   4414						BR_ACQUIRE, "BR_ACQUIRE");
   4415			if (!ret && !strong && has_strong_ref)
   4416				ret = binder_put_node_cmd(
   4417						proc, thread, &ptr, node_ptr,
   4418						node_cookie, node_debug_id,
   4419						BR_RELEASE, "BR_RELEASE");
   4420			if (!ret && !weak && has_weak_ref)
   4421				ret = binder_put_node_cmd(
   4422						proc, thread, &ptr, node_ptr,
   4423						node_cookie, node_debug_id,
   4424						BR_DECREFS, "BR_DECREFS");
   4425			if (orig_ptr == ptr)
   4426				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
   4427					     "%d:%d node %d u%016llx c%016llx state unchanged\n",
   4428					     proc->pid, thread->pid,
   4429					     node_debug_id,
   4430					     (u64)node_ptr,
   4431					     (u64)node_cookie);
   4432			if (ret)
   4433				return ret;
   4434		} break;
   4435		case BINDER_WORK_DEAD_BINDER:
   4436		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
   4437		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
   4438			struct binder_ref_death *death;
   4439			uint32_t cmd;
   4440			binder_uintptr_t cookie;
   4441
   4442			death = container_of(w, struct binder_ref_death, work);
   4443			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
   4444				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
   4445			else
   4446				cmd = BR_DEAD_BINDER;
   4447			cookie = death->cookie;
   4448
   4449			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
   4450				     "%d:%d %s %016llx\n",
   4451				      proc->pid, thread->pid,
   4452				      cmd == BR_DEAD_BINDER ?
   4453				      "BR_DEAD_BINDER" :
   4454				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
   4455				      (u64)cookie);
   4456			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
   4457				binder_inner_proc_unlock(proc);
   4458				kfree(death);
   4459				binder_stats_deleted(BINDER_STAT_DEATH);
   4460			} else {
   4461				binder_enqueue_work_ilocked(
   4462						w, &proc->delivered_death);
   4463				binder_inner_proc_unlock(proc);
   4464			}
   4465			if (put_user(cmd, (uint32_t __user *)ptr))
   4466				return -EFAULT;
   4467			ptr += sizeof(uint32_t);
   4468			if (put_user(cookie,
   4469				     (binder_uintptr_t __user *)ptr))
   4470				return -EFAULT;
   4471			ptr += sizeof(binder_uintptr_t);
   4472			binder_stat_br(proc, thread, cmd);
   4473			if (cmd == BR_DEAD_BINDER)
   4474				goto done; /* DEAD_BINDER notifications can cause transactions */
   4475		} break;
   4476		default:
   4477			binder_inner_proc_unlock(proc);
   4478			pr_err("%d:%d: bad work type %d\n",
   4479			       proc->pid, thread->pid, w->type);
   4480			break;
   4481		}
   4482
   4483		if (!t)
   4484			continue;
   4485
   4486		BUG_ON(t->buffer == NULL);
   4487		if (t->buffer->target_node) {
   4488			struct binder_node *target_node = t->buffer->target_node;
   4489
   4490			trd->target.ptr = target_node->ptr;
   4491			trd->cookie =  target_node->cookie;
   4492			t->saved_priority = task_nice(current);
   4493			if (t->priority < target_node->min_priority &&
   4494			    !(t->flags & TF_ONE_WAY))
   4495				binder_set_nice(t->priority);
   4496			else if (!(t->flags & TF_ONE_WAY) ||
   4497				 t->saved_priority > target_node->min_priority)
   4498				binder_set_nice(target_node->min_priority);
   4499			cmd = BR_TRANSACTION;
   4500		} else {
   4501			trd->target.ptr = 0;
   4502			trd->cookie = 0;
   4503			cmd = BR_REPLY;
   4504		}
   4505		trd->code = t->code;
   4506		trd->flags = t->flags;
   4507		trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
   4508
   4509		t_from = binder_get_txn_from(t);
   4510		if (t_from) {
   4511			struct task_struct *sender = t_from->proc->tsk;
   4512
   4513			trd->sender_pid =
   4514				task_tgid_nr_ns(sender,
   4515						task_active_pid_ns(current));
   4516		} else {
   4517			trd->sender_pid = 0;
   4518		}
   4519
   4520		ret = binder_apply_fd_fixups(proc, t);
   4521		if (ret) {
   4522			struct binder_buffer *buffer = t->buffer;
   4523			bool oneway = !!(t->flags & TF_ONE_WAY);
   4524			int tid = t->debug_id;
   4525
   4526			if (t_from)
   4527				binder_thread_dec_tmpref(t_from);
   4528			buffer->transaction = NULL;
   4529			binder_cleanup_transaction(t, "fd fixups failed",
   4530						   BR_FAILED_REPLY);
   4531			binder_free_buf(proc, thread, buffer, true);
   4532			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
   4533				     "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
   4534				     proc->pid, thread->pid,
   4535				     oneway ? "async " :
   4536					(cmd == BR_REPLY ? "reply " : ""),
   4537				     tid, BR_FAILED_REPLY, ret, __LINE__);
   4538			if (cmd == BR_REPLY) {
   4539				cmd = BR_FAILED_REPLY;
   4540				if (put_user(cmd, (uint32_t __user *)ptr))
   4541					return -EFAULT;
   4542				ptr += sizeof(uint32_t);
   4543				binder_stat_br(proc, thread, cmd);
   4544				break;
   4545			}
   4546			continue;
   4547		}
   4548		trd->data_size = t->buffer->data_size;
   4549		trd->offsets_size = t->buffer->offsets_size;
   4550		trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
   4551		trd->data.ptr.offsets = trd->data.ptr.buffer +
   4552					ALIGN(t->buffer->data_size,
   4553					    sizeof(void *));
   4554
   4555		tr.secctx = t->security_ctx;
   4556		if (t->security_ctx) {
   4557			cmd = BR_TRANSACTION_SEC_CTX;
   4558			trsize = sizeof(tr);
   4559		}
   4560		if (put_user(cmd, (uint32_t __user *)ptr)) {
   4561			if (t_from)
   4562				binder_thread_dec_tmpref(t_from);
   4563
   4564			binder_cleanup_transaction(t, "put_user failed",
   4565						   BR_FAILED_REPLY);
   4566
   4567			return -EFAULT;
   4568		}
   4569		ptr += sizeof(uint32_t);
   4570		if (copy_to_user(ptr, &tr, trsize)) {
   4571			if (t_from)
   4572				binder_thread_dec_tmpref(t_from);
   4573
   4574			binder_cleanup_transaction(t, "copy_to_user failed",
   4575						   BR_FAILED_REPLY);
   4576
   4577			return -EFAULT;
   4578		}
   4579		ptr += trsize;
   4580
   4581		trace_binder_transaction_received(t);
   4582		binder_stat_br(proc, thread, cmd);
   4583		binder_debug(BINDER_DEBUG_TRANSACTION,
   4584			     "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
   4585			     proc->pid, thread->pid,
   4586			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
   4587				(cmd == BR_TRANSACTION_SEC_CTX) ?
   4588				     "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
   4589			     t->debug_id, t_from ? t_from->proc->pid : 0,
   4590			     t_from ? t_from->pid : 0, cmd,
   4591			     t->buffer->data_size, t->buffer->offsets_size,
   4592			     (u64)trd->data.ptr.buffer,
   4593			     (u64)trd->data.ptr.offsets);
   4594
   4595		if (t_from)
   4596			binder_thread_dec_tmpref(t_from);
   4597		t->buffer->allow_user_free = 1;
   4598		if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
   4599			binder_inner_proc_lock(thread->proc);
   4600			t->to_parent = thread->transaction_stack;
   4601			t->to_thread = thread;
   4602			thread->transaction_stack = t;
   4603			binder_inner_proc_unlock(thread->proc);
   4604		} else {
   4605			binder_free_transaction(t);
   4606		}
   4607		break;
   4608	}
   4609
   4610done:
   4611
   4612	*consumed = ptr - buffer;
   4613	binder_inner_proc_lock(proc);
   4614	if (proc->requested_threads == 0 &&
   4615	    list_empty(&thread->proc->waiting_threads) &&
   4616	    proc->requested_threads_started < proc->max_threads &&
   4617	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
   4618	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
   4619	     /*spawn a new thread if we leave this out */) {
   4620		proc->requested_threads++;
   4621		binder_inner_proc_unlock(proc);
   4622		binder_debug(BINDER_DEBUG_THREADS,
   4623			     "%d:%d BR_SPAWN_LOOPER\n",
   4624			     proc->pid, thread->pid);
   4625		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
   4626			return -EFAULT;
   4627		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
   4628	} else
   4629		binder_inner_proc_unlock(proc);
   4630	return 0;
   4631}
   4632
   4633static void binder_release_work(struct binder_proc *proc,
   4634				struct list_head *list)
   4635{
   4636	struct binder_work *w;
   4637	enum binder_work_type wtype;
   4638
   4639	while (1) {
   4640		binder_inner_proc_lock(proc);
   4641		w = binder_dequeue_work_head_ilocked(list);
   4642		wtype = w ? w->type : 0;
   4643		binder_inner_proc_unlock(proc);
   4644		if (!w)
   4645			return;
   4646
   4647		switch (wtype) {
   4648		case BINDER_WORK_TRANSACTION: {
   4649			struct binder_transaction *t;
   4650
   4651			t = container_of(w, struct binder_transaction, work);
   4652
   4653			binder_cleanup_transaction(t, "process died.",
   4654						   BR_DEAD_REPLY);
   4655		} break;
   4656		case BINDER_WORK_RETURN_ERROR: {
   4657			struct binder_error *e = container_of(
   4658					w, struct binder_error, work);
   4659
   4660			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
   4661				"undelivered TRANSACTION_ERROR: %u\n",
   4662				e->cmd);
   4663		} break;
   4664		case BINDER_WORK_TRANSACTION_COMPLETE: {
   4665			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
   4666				"undelivered TRANSACTION_COMPLETE\n");
   4667			kfree(w);
   4668			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
   4669		} break;
   4670		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
   4671		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
   4672			struct binder_ref_death *death;
   4673
   4674			death = container_of(w, struct binder_ref_death, work);
   4675			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
   4676				"undelivered death notification, %016llx\n",
   4677				(u64)death->cookie);
   4678			kfree(death);
   4679			binder_stats_deleted(BINDER_STAT_DEATH);
   4680		} break;
   4681		case BINDER_WORK_NODE:
   4682			break;
   4683		default:
   4684			pr_err("unexpected work type, %d, not freed\n",
   4685			       wtype);
   4686			break;
   4687		}
   4688	}
   4689
   4690}
   4691
   4692static struct binder_thread *binder_get_thread_ilocked(
   4693		struct binder_proc *proc, struct binder_thread *new_thread)
   4694{
   4695	struct binder_thread *thread = NULL;
   4696	struct rb_node *parent = NULL;
   4697	struct rb_node **p = &proc->threads.rb_node;
   4698
   4699	while (*p) {
   4700		parent = *p;
   4701		thread = rb_entry(parent, struct binder_thread, rb_node);
   4702
   4703		if (current->pid < thread->pid)
   4704			p = &(*p)->rb_left;
   4705		else if (current->pid > thread->pid)
   4706			p = &(*p)->rb_right;
   4707		else
   4708			return thread;
   4709	}
   4710	if (!new_thread)
   4711		return NULL;
   4712	thread = new_thread;
   4713	binder_stats_created(BINDER_STAT_THREAD);
   4714	thread->proc = proc;
   4715	thread->pid = current->pid;
   4716	atomic_set(&thread->tmp_ref, 0);
   4717	init_waitqueue_head(&thread->wait);
   4718	INIT_LIST_HEAD(&thread->todo);
   4719	rb_link_node(&thread->rb_node, parent, p);
   4720	rb_insert_color(&thread->rb_node, &proc->threads);
   4721	thread->looper_need_return = true;
   4722	thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
   4723	thread->return_error.cmd = BR_OK;
   4724	thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
   4725	thread->reply_error.cmd = BR_OK;
   4726	thread->ee.command = BR_OK;
   4727	INIT_LIST_HEAD(&new_thread->waiting_thread_node);
   4728	return thread;
   4729}
   4730
   4731static struct binder_thread *binder_get_thread(struct binder_proc *proc)
   4732{
   4733	struct binder_thread *thread;
   4734	struct binder_thread *new_thread;
   4735
   4736	binder_inner_proc_lock(proc);
   4737	thread = binder_get_thread_ilocked(proc, NULL);
   4738	binder_inner_proc_unlock(proc);
   4739	if (!thread) {
   4740		new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
   4741		if (new_thread == NULL)
   4742			return NULL;
   4743		binder_inner_proc_lock(proc);
   4744		thread = binder_get_thread_ilocked(proc, new_thread);
   4745		binder_inner_proc_unlock(proc);
   4746		if (thread != new_thread)
   4747			kfree(new_thread);
   4748	}
   4749	return thread;
   4750}
   4751
   4752static void binder_free_proc(struct binder_proc *proc)
   4753{
   4754	struct binder_device *device;
   4755
   4756	BUG_ON(!list_empty(&proc->todo));
   4757	BUG_ON(!list_empty(&proc->delivered_death));
   4758	if (proc->outstanding_txns)
   4759		pr_warn("%s: Unexpected outstanding_txns %d\n",
   4760			__func__, proc->outstanding_txns);
   4761	device = container_of(proc->context, struct binder_device, context);
   4762	if (refcount_dec_and_test(&device->ref)) {
   4763		kfree(proc->context->name);
   4764		kfree(device);
   4765	}
   4766	binder_alloc_deferred_release(&proc->alloc);
   4767	put_task_struct(proc->tsk);
   4768	put_cred(proc->cred);
   4769	binder_stats_deleted(BINDER_STAT_PROC);
   4770	kfree(proc);
   4771}
   4772
   4773static void binder_free_thread(struct binder_thread *thread)
   4774{
   4775	BUG_ON(!list_empty(&thread->todo));
   4776	binder_stats_deleted(BINDER_STAT_THREAD);
   4777	binder_proc_dec_tmpref(thread->proc);
   4778	kfree(thread);
   4779}
   4780
   4781static int binder_thread_release(struct binder_proc *proc,
   4782				 struct binder_thread *thread)
   4783{
   4784	struct binder_transaction *t;
   4785	struct binder_transaction *send_reply = NULL;
   4786	int active_transactions = 0;
   4787	struct binder_transaction *last_t = NULL;
   4788
   4789	binder_inner_proc_lock(thread->proc);
   4790	/*
   4791	 * take a ref on the proc so it survives
   4792	 * after we remove this thread from proc->threads.
   4793	 * The corresponding dec is when we actually
   4794	 * free the thread in binder_free_thread()
   4795	 */
   4796	proc->tmp_ref++;
   4797	/*
   4798	 * take a ref on this thread to ensure it
   4799	 * survives while we are releasing it
   4800	 */
   4801	atomic_inc(&thread->tmp_ref);
   4802	rb_erase(&thread->rb_node, &proc->threads);
   4803	t = thread->transaction_stack;
   4804	if (t) {
   4805		spin_lock(&t->lock);
   4806		if (t->to_thread == thread)
   4807			send_reply = t;
   4808	} else {
   4809		__acquire(&t->lock);
   4810	}
   4811	thread->is_dead = true;
   4812
   4813	while (t) {
   4814		last_t = t;
   4815		active_transactions++;
   4816		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
   4817			     "release %d:%d transaction %d %s, still active\n",
   4818			      proc->pid, thread->pid,
   4819			     t->debug_id,
   4820			     (t->to_thread == thread) ? "in" : "out");
   4821
   4822		if (t->to_thread == thread) {
   4823			thread->proc->outstanding_txns--;
   4824			t->to_proc = NULL;
   4825			t->to_thread = NULL;
   4826			if (t->buffer) {
   4827				t->buffer->transaction = NULL;
   4828				t->buffer = NULL;
   4829			}
   4830			t = t->to_parent;
   4831		} else if (t->from == thread) {
   4832			t->from = NULL;
   4833			t = t->from_parent;
   4834		} else
   4835			BUG();
   4836		spin_unlock(&last_t->lock);
   4837		if (t)
   4838			spin_lock(&t->lock);
   4839		else
   4840			__acquire(&t->lock);
   4841	}
   4842	/* annotation for sparse, lock not acquired in last iteration above */
   4843	__release(&t->lock);
   4844
   4845	/*
   4846	 * If this thread used poll, make sure we remove the waitqueue from any
   4847	 * poll data structures holding it.
   4848	 */
   4849	if (thread->looper & BINDER_LOOPER_STATE_POLL)
   4850		wake_up_pollfree(&thread->wait);
   4851
   4852	binder_inner_proc_unlock(thread->proc);
   4853
   4854	/*
   4855	 * This is needed to avoid races between wake_up_pollfree() above and
   4856	 * someone else removing the last entry from the queue for other reasons
   4857	 * (e.g. ep_remove_wait_queue() being called due to an epoll file
   4858	 * descriptor being closed).  Such other users hold an RCU read lock, so
   4859	 * we can be sure they're done after we call synchronize_rcu().
   4860	 */
   4861	if (thread->looper & BINDER_LOOPER_STATE_POLL)
   4862		synchronize_rcu();
   4863
   4864	if (send_reply)
   4865		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
   4866	binder_release_work(proc, &thread->todo);
   4867	binder_thread_dec_tmpref(thread);
   4868	return active_transactions;
   4869}
   4870
   4871static __poll_t binder_poll(struct file *filp,
   4872				struct poll_table_struct *wait)
   4873{
   4874	struct binder_proc *proc = filp->private_data;
   4875	struct binder_thread *thread = NULL;
   4876	bool wait_for_proc_work;
   4877
   4878	thread = binder_get_thread(proc);
   4879	if (!thread)
   4880		return POLLERR;
   4881
   4882	binder_inner_proc_lock(thread->proc);
   4883	thread->looper |= BINDER_LOOPER_STATE_POLL;
   4884	wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
   4885
   4886	binder_inner_proc_unlock(thread->proc);
   4887
   4888	poll_wait(filp, &thread->wait, wait);
   4889
   4890	if (binder_has_work(thread, wait_for_proc_work))
   4891		return EPOLLIN;
   4892
   4893	return 0;
   4894}
   4895
   4896static int binder_ioctl_write_read(struct file *filp,
   4897				unsigned int cmd, unsigned long arg,
   4898				struct binder_thread *thread)
   4899{
   4900	int ret = 0;
   4901	struct binder_proc *proc = filp->private_data;
   4902	unsigned int size = _IOC_SIZE(cmd);
   4903	void __user *ubuf = (void __user *)arg;
   4904	struct binder_write_read bwr;
   4905
   4906	if (size != sizeof(struct binder_write_read)) {
   4907		ret = -EINVAL;
   4908		goto out;
   4909	}
   4910	if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
   4911		ret = -EFAULT;
   4912		goto out;
   4913	}
   4914	binder_debug(BINDER_DEBUG_READ_WRITE,
   4915		     "%d:%d write %lld at %016llx, read %lld at %016llx\n",
   4916		     proc->pid, thread->pid,
   4917		     (u64)bwr.write_size, (u64)bwr.write_buffer,
   4918		     (u64)bwr.read_size, (u64)bwr.read_buffer);
   4919
   4920	if (bwr.write_size > 0) {
   4921		ret = binder_thread_write(proc, thread,
   4922					  bwr.write_buffer,
   4923					  bwr.write_size,
   4924					  &bwr.write_consumed);
   4925		trace_binder_write_done(ret);
   4926		if (ret < 0) {
   4927			bwr.read_consumed = 0;
   4928			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
   4929				ret = -EFAULT;
   4930			goto out;
   4931		}
   4932	}
   4933	if (bwr.read_size > 0) {
   4934		ret = binder_thread_read(proc, thread, bwr.read_buffer,
   4935					 bwr.read_size,
   4936					 &bwr.read_consumed,
   4937					 filp->f_flags & O_NONBLOCK);
   4938		trace_binder_read_done(ret);
   4939		binder_inner_proc_lock(proc);
   4940		if (!binder_worklist_empty_ilocked(&proc->todo))
   4941			binder_wakeup_proc_ilocked(proc);
   4942		binder_inner_proc_unlock(proc);
   4943		if (ret < 0) {
   4944			if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
   4945				ret = -EFAULT;
   4946			goto out;
   4947		}
   4948	}
   4949	binder_debug(BINDER_DEBUG_READ_WRITE,
   4950		     "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
   4951		     proc->pid, thread->pid,
   4952		     (u64)bwr.write_consumed, (u64)bwr.write_size,
   4953		     (u64)bwr.read_consumed, (u64)bwr.read_size);
   4954	if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
   4955		ret = -EFAULT;
   4956		goto out;
   4957	}
   4958out:
   4959	return ret;
   4960}
   4961
   4962static int binder_ioctl_set_ctx_mgr(struct file *filp,
   4963				    struct flat_binder_object *fbo)
   4964{
   4965	int ret = 0;
   4966	struct binder_proc *proc = filp->private_data;
   4967	struct binder_context *context = proc->context;
   4968	struct binder_node *new_node;
   4969	kuid_t curr_euid = current_euid();
   4970
   4971	mutex_lock(&context->context_mgr_node_lock);
   4972	if (context->binder_context_mgr_node) {
   4973		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
   4974		ret = -EBUSY;
   4975		goto out;
   4976	}
   4977	ret = security_binder_set_context_mgr(proc->cred);
   4978	if (ret < 0)
   4979		goto out;
   4980	if (uid_valid(context->binder_context_mgr_uid)) {
   4981		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
   4982			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
   4983			       from_kuid(&init_user_ns, curr_euid),
   4984			       from_kuid(&init_user_ns,
   4985					 context->binder_context_mgr_uid));
   4986			ret = -EPERM;
   4987			goto out;
   4988		}
   4989	} else {
   4990		context->binder_context_mgr_uid = curr_euid;
   4991	}
   4992	new_node = binder_new_node(proc, fbo);
   4993	if (!new_node) {
   4994		ret = -ENOMEM;
   4995		goto out;
   4996	}
   4997	binder_node_lock(new_node);
   4998	new_node->local_weak_refs++;
   4999	new_node->local_strong_refs++;
   5000	new_node->has_strong_ref = 1;
   5001	new_node->has_weak_ref = 1;
   5002	context->binder_context_mgr_node = new_node;
   5003	binder_node_unlock(new_node);
   5004	binder_put_node(new_node);
   5005out:
   5006	mutex_unlock(&context->context_mgr_node_lock);
   5007	return ret;
   5008}
   5009
   5010static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
   5011		struct binder_node_info_for_ref *info)
   5012{
   5013	struct binder_node *node;
   5014	struct binder_context *context = proc->context;
   5015	__u32 handle = info->handle;
   5016
   5017	if (info->strong_count || info->weak_count || info->reserved1 ||
   5018	    info->reserved2 || info->reserved3) {
   5019		binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
   5020				  proc->pid);
   5021		return -EINVAL;
   5022	}
   5023
   5024	/* This ioctl may only be used by the context manager */
   5025	mutex_lock(&context->context_mgr_node_lock);
   5026	if (!context->binder_context_mgr_node ||
   5027		context->binder_context_mgr_node->proc != proc) {
   5028		mutex_unlock(&context->context_mgr_node_lock);
   5029		return -EPERM;
   5030	}
   5031	mutex_unlock(&context->context_mgr_node_lock);
   5032
   5033	node = binder_get_node_from_ref(proc, handle, true, NULL);
   5034	if (!node)
   5035		return -EINVAL;
   5036
   5037	info->strong_count = node->local_strong_refs +
   5038		node->internal_strong_refs;
   5039	info->weak_count = node->local_weak_refs;
   5040
   5041	binder_put_node(node);
   5042
   5043	return 0;
   5044}
   5045
   5046static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
   5047				struct binder_node_debug_info *info)
   5048{
   5049	struct rb_node *n;
   5050	binder_uintptr_t ptr = info->ptr;
   5051
   5052	memset(info, 0, sizeof(*info));
   5053
   5054	binder_inner_proc_lock(proc);
   5055	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
   5056		struct binder_node *node = rb_entry(n, struct binder_node,
   5057						    rb_node);
   5058		if (node->ptr > ptr) {
   5059			info->ptr = node->ptr;
   5060			info->cookie = node->cookie;
   5061			info->has_strong_ref = node->has_strong_ref;
   5062			info->has_weak_ref = node->has_weak_ref;
   5063			break;
   5064		}
   5065	}
   5066	binder_inner_proc_unlock(proc);
   5067
   5068	return 0;
   5069}
   5070
   5071static bool binder_txns_pending_ilocked(struct binder_proc *proc)
   5072{
   5073	struct rb_node *n;
   5074	struct binder_thread *thread;
   5075
   5076	if (proc->outstanding_txns > 0)
   5077		return true;
   5078
   5079	for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
   5080		thread = rb_entry(n, struct binder_thread, rb_node);
   5081		if (thread->transaction_stack)
   5082			return true;
   5083	}
   5084	return false;
   5085}
   5086
   5087static int binder_ioctl_freeze(struct binder_freeze_info *info,
   5088			       struct binder_proc *target_proc)
   5089{
   5090	int ret = 0;
   5091
   5092	if (!info->enable) {
   5093		binder_inner_proc_lock(target_proc);
   5094		target_proc->sync_recv = false;
   5095		target_proc->async_recv = false;
   5096		target_proc->is_frozen = false;
   5097		binder_inner_proc_unlock(target_proc);
   5098		return 0;
   5099	}
   5100
   5101	/*
   5102	 * Freezing the target. Prevent new transactions by
   5103	 * setting frozen state. If timeout specified, wait
   5104	 * for transactions to drain.
   5105	 */
   5106	binder_inner_proc_lock(target_proc);
   5107	target_proc->sync_recv = false;
   5108	target_proc->async_recv = false;
   5109	target_proc->is_frozen = true;
   5110	binder_inner_proc_unlock(target_proc);
   5111
   5112	if (info->timeout_ms > 0)
   5113		ret = wait_event_interruptible_timeout(
   5114			target_proc->freeze_wait,
   5115			(!target_proc->outstanding_txns),
   5116			msecs_to_jiffies(info->timeout_ms));
   5117
   5118	/* Check pending transactions that wait for reply */
   5119	if (ret >= 0) {
   5120		binder_inner_proc_lock(target_proc);
   5121		if (binder_txns_pending_ilocked(target_proc))
   5122			ret = -EAGAIN;
   5123		binder_inner_proc_unlock(target_proc);
   5124	}
   5125
   5126	if (ret < 0) {
   5127		binder_inner_proc_lock(target_proc);
   5128		target_proc->is_frozen = false;
   5129		binder_inner_proc_unlock(target_proc);
   5130	}
   5131
   5132	return ret;
   5133}
   5134
   5135static int binder_ioctl_get_freezer_info(
   5136				struct binder_frozen_status_info *info)
   5137{
   5138	struct binder_proc *target_proc;
   5139	bool found = false;
   5140	__u32 txns_pending;
   5141
   5142	info->sync_recv = 0;
   5143	info->async_recv = 0;
   5144
   5145	mutex_lock(&binder_procs_lock);
   5146	hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
   5147		if (target_proc->pid == info->pid) {
   5148			found = true;
   5149			binder_inner_proc_lock(target_proc);
   5150			txns_pending = binder_txns_pending_ilocked(target_proc);
   5151			info->sync_recv |= target_proc->sync_recv |
   5152					(txns_pending << 1);
   5153			info->async_recv |= target_proc->async_recv;
   5154			binder_inner_proc_unlock(target_proc);
   5155		}
   5156	}
   5157	mutex_unlock(&binder_procs_lock);
   5158
   5159	if (!found)
   5160		return -EINVAL;
   5161
   5162	return 0;
   5163}
   5164
   5165static int binder_ioctl_get_extended_error(struct binder_thread *thread,
   5166					   void __user *ubuf)
   5167{
   5168	struct binder_extended_error ee;
   5169
   5170	binder_inner_proc_lock(thread->proc);
   5171	ee = thread->ee;
   5172	binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
   5173	binder_inner_proc_unlock(thread->proc);
   5174
   5175	if (copy_to_user(ubuf, &ee, sizeof(ee)))
   5176		return -EFAULT;
   5177
   5178	return 0;
   5179}
   5180
   5181static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
   5182{
   5183	int ret;
   5184	struct binder_proc *proc = filp->private_data;
   5185	struct binder_thread *thread;
   5186	unsigned int size = _IOC_SIZE(cmd);
   5187	void __user *ubuf = (void __user *)arg;
   5188
   5189	/*pr_info("binder_ioctl: %d:%d %x %lx\n",
   5190			proc->pid, current->pid, cmd, arg);*/
   5191
   5192	binder_selftest_alloc(&proc->alloc);
   5193
   5194	trace_binder_ioctl(cmd, arg);
   5195
   5196	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
   5197	if (ret)
   5198		goto err_unlocked;
   5199
   5200	thread = binder_get_thread(proc);
   5201	if (thread == NULL) {
   5202		ret = -ENOMEM;
   5203		goto err;
   5204	}
   5205
   5206	switch (cmd) {
   5207	case BINDER_WRITE_READ:
   5208		ret = binder_ioctl_write_read(filp, cmd, arg, thread);
   5209		if (ret)
   5210			goto err;
   5211		break;
   5212	case BINDER_SET_MAX_THREADS: {
   5213		int max_threads;
   5214
   5215		if (copy_from_user(&max_threads, ubuf,
   5216				   sizeof(max_threads))) {
   5217			ret = -EINVAL;
   5218			goto err;
   5219		}
   5220		binder_inner_proc_lock(proc);
   5221		proc->max_threads = max_threads;
   5222		binder_inner_proc_unlock(proc);
   5223		break;
   5224	}
   5225	case BINDER_SET_CONTEXT_MGR_EXT: {
   5226		struct flat_binder_object fbo;
   5227
   5228		if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
   5229			ret = -EINVAL;
   5230			goto err;
   5231		}
   5232		ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
   5233		if (ret)
   5234			goto err;
   5235		break;
   5236	}
   5237	case BINDER_SET_CONTEXT_MGR:
   5238		ret = binder_ioctl_set_ctx_mgr(filp, NULL);
   5239		if (ret)
   5240			goto err;
   5241		break;
   5242	case BINDER_THREAD_EXIT:
   5243		binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
   5244			     proc->pid, thread->pid);
   5245		binder_thread_release(proc, thread);
   5246		thread = NULL;
   5247		break;
   5248	case BINDER_VERSION: {
   5249		struct binder_version __user *ver = ubuf;
   5250
   5251		if (size != sizeof(struct binder_version)) {
   5252			ret = -EINVAL;
   5253			goto err;
   5254		}
   5255		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
   5256			     &ver->protocol_version)) {
   5257			ret = -EINVAL;
   5258			goto err;
   5259		}
   5260		break;
   5261	}
   5262	case BINDER_GET_NODE_INFO_FOR_REF: {
   5263		struct binder_node_info_for_ref info;
   5264
   5265		if (copy_from_user(&info, ubuf, sizeof(info))) {
   5266			ret = -EFAULT;
   5267			goto err;
   5268		}
   5269
   5270		ret = binder_ioctl_get_node_info_for_ref(proc, &info);
   5271		if (ret < 0)
   5272			goto err;
   5273
   5274		if (copy_to_user(ubuf, &info, sizeof(info))) {
   5275			ret = -EFAULT;
   5276			goto err;
   5277		}
   5278
   5279		break;
   5280	}
   5281	case BINDER_GET_NODE_DEBUG_INFO: {
   5282		struct binder_node_debug_info info;
   5283
   5284		if (copy_from_user(&info, ubuf, sizeof(info))) {
   5285			ret = -EFAULT;
   5286			goto err;
   5287		}
   5288
   5289		ret = binder_ioctl_get_node_debug_info(proc, &info);
   5290		if (ret < 0)
   5291			goto err;
   5292
   5293		if (copy_to_user(ubuf, &info, sizeof(info))) {
   5294			ret = -EFAULT;
   5295			goto err;
   5296		}
   5297		break;
   5298	}
   5299	case BINDER_FREEZE: {
   5300		struct binder_freeze_info info;
   5301		struct binder_proc **target_procs = NULL, *target_proc;
   5302		int target_procs_count = 0, i = 0;
   5303
   5304		ret = 0;
   5305
   5306		if (copy_from_user(&info, ubuf, sizeof(info))) {
   5307			ret = -EFAULT;
   5308			goto err;
   5309		}
   5310
   5311		mutex_lock(&binder_procs_lock);
   5312		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
   5313			if (target_proc->pid == info.pid)
   5314				target_procs_count++;
   5315		}
   5316
   5317		if (target_procs_count == 0) {
   5318			mutex_unlock(&binder_procs_lock);
   5319			ret = -EINVAL;
   5320			goto err;
   5321		}
   5322
   5323		target_procs = kcalloc(target_procs_count,
   5324				       sizeof(struct binder_proc *),
   5325				       GFP_KERNEL);
   5326
   5327		if (!target_procs) {
   5328			mutex_unlock(&binder_procs_lock);
   5329			ret = -ENOMEM;
   5330			goto err;
   5331		}
   5332
   5333		hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
   5334			if (target_proc->pid != info.pid)
   5335				continue;
   5336
   5337			binder_inner_proc_lock(target_proc);
   5338			target_proc->tmp_ref++;
   5339			binder_inner_proc_unlock(target_proc);
   5340
   5341			target_procs[i++] = target_proc;
   5342		}
   5343		mutex_unlock(&binder_procs_lock);
   5344
   5345		for (i = 0; i < target_procs_count; i++) {
   5346			if (ret >= 0)
   5347				ret = binder_ioctl_freeze(&info,
   5348							  target_procs[i]);
   5349
   5350			binder_proc_dec_tmpref(target_procs[i]);
   5351		}
   5352
   5353		kfree(target_procs);
   5354
   5355		if (ret < 0)
   5356			goto err;
   5357		break;
   5358	}
   5359	case BINDER_GET_FROZEN_INFO: {
   5360		struct binder_frozen_status_info info;
   5361
   5362		if (copy_from_user(&info, ubuf, sizeof(info))) {
   5363			ret = -EFAULT;
   5364			goto err;
   5365		}
   5366
   5367		ret = binder_ioctl_get_freezer_info(&info);
   5368		if (ret < 0)
   5369			goto err;
   5370
   5371		if (copy_to_user(ubuf, &info, sizeof(info))) {
   5372			ret = -EFAULT;
   5373			goto err;
   5374		}
   5375		break;
   5376	}
   5377	case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
   5378		uint32_t enable;
   5379
   5380		if (copy_from_user(&enable, ubuf, sizeof(enable))) {
   5381			ret = -EFAULT;
   5382			goto err;
   5383		}
   5384		binder_inner_proc_lock(proc);
   5385		proc->oneway_spam_detection_enabled = (bool)enable;
   5386		binder_inner_proc_unlock(proc);
   5387		break;
   5388	}
   5389	case BINDER_GET_EXTENDED_ERROR:
   5390		ret = binder_ioctl_get_extended_error(thread, ubuf);
   5391		if (ret < 0)
   5392			goto err;
   5393		break;
   5394	default:
   5395		ret = -EINVAL;
   5396		goto err;
   5397	}
   5398	ret = 0;
   5399err:
   5400	if (thread)
   5401		thread->looper_need_return = false;
   5402	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
   5403	if (ret && ret != -EINTR)
   5404		pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
   5405err_unlocked:
   5406	trace_binder_ioctl_done(ret);
   5407	return ret;
   5408}
   5409
   5410static void binder_vma_open(struct vm_area_struct *vma)
   5411{
   5412	struct binder_proc *proc = vma->vm_private_data;
   5413
   5414	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
   5415		     "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
   5416		     proc->pid, vma->vm_start, vma->vm_end,
   5417		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
   5418		     (unsigned long)pgprot_val(vma->vm_page_prot));
   5419}
   5420
   5421static void binder_vma_close(struct vm_area_struct *vma)
   5422{
   5423	struct binder_proc *proc = vma->vm_private_data;
   5424
   5425	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
   5426		     "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
   5427		     proc->pid, vma->vm_start, vma->vm_end,
   5428		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
   5429		     (unsigned long)pgprot_val(vma->vm_page_prot));
   5430	binder_alloc_vma_close(&proc->alloc);
   5431}
   5432
   5433static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
   5434{
   5435	return VM_FAULT_SIGBUS;
   5436}
   5437
   5438static const struct vm_operations_struct binder_vm_ops = {
   5439	.open = binder_vma_open,
   5440	.close = binder_vma_close,
   5441	.fault = binder_vm_fault,
   5442};
   5443
   5444static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
   5445{
   5446	struct binder_proc *proc = filp->private_data;
   5447
   5448	if (proc->tsk != current->group_leader)
   5449		return -EINVAL;
   5450
   5451	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
   5452		     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
   5453		     __func__, proc->pid, vma->vm_start, vma->vm_end,
   5454		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
   5455		     (unsigned long)pgprot_val(vma->vm_page_prot));
   5456
   5457	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
   5458		pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
   5459		       proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
   5460		return -EPERM;
   5461	}
   5462	vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
   5463	vma->vm_flags &= ~VM_MAYWRITE;
   5464
   5465	vma->vm_ops = &binder_vm_ops;
   5466	vma->vm_private_data = proc;
   5467
   5468	return binder_alloc_mmap_handler(&proc->alloc, vma);
   5469}
   5470
   5471static int binder_open(struct inode *nodp, struct file *filp)
   5472{
   5473	struct binder_proc *proc, *itr;
   5474	struct binder_device *binder_dev;
   5475	struct binderfs_info *info;
   5476	struct dentry *binder_binderfs_dir_entry_proc = NULL;
   5477	bool existing_pid = false;
   5478
   5479	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
   5480		     current->group_leader->pid, current->pid);
   5481
   5482	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
   5483	if (proc == NULL)
   5484		return -ENOMEM;
   5485	spin_lock_init(&proc->inner_lock);
   5486	spin_lock_init(&proc->outer_lock);
   5487	get_task_struct(current->group_leader);
   5488	proc->tsk = current->group_leader;
   5489	proc->cred = get_cred(filp->f_cred);
   5490	INIT_LIST_HEAD(&proc->todo);
   5491	init_waitqueue_head(&proc->freeze_wait);
   5492	proc->default_priority = task_nice(current);
   5493	/* binderfs stashes devices in i_private */
   5494	if (is_binderfs_device(nodp)) {
   5495		binder_dev = nodp->i_private;
   5496		info = nodp->i_sb->s_fs_info;
   5497		binder_binderfs_dir_entry_proc = info->proc_log_dir;
   5498	} else {
   5499		binder_dev = container_of(filp->private_data,
   5500					  struct binder_device, miscdev);
   5501	}
   5502	refcount_inc(&binder_dev->ref);
   5503	proc->context = &binder_dev->context;
   5504	binder_alloc_init(&proc->alloc);
   5505
   5506	binder_stats_created(BINDER_STAT_PROC);
   5507	proc->pid = current->group_leader->pid;
   5508	INIT_LIST_HEAD(&proc->delivered_death);
   5509	INIT_LIST_HEAD(&proc->waiting_threads);
   5510	filp->private_data = proc;
   5511
   5512	mutex_lock(&binder_procs_lock);
   5513	hlist_for_each_entry(itr, &binder_procs, proc_node) {
   5514		if (itr->pid == proc->pid) {
   5515			existing_pid = true;
   5516			break;
   5517		}
   5518	}
   5519	hlist_add_head(&proc->proc_node, &binder_procs);
   5520	mutex_unlock(&binder_procs_lock);
   5521
   5522	if (binder_debugfs_dir_entry_proc && !existing_pid) {
   5523		char strbuf[11];
   5524
   5525		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
   5526		/*
   5527		 * proc debug entries are shared between contexts.
   5528		 * Only create for the first PID to avoid debugfs log spamming
   5529		 * The printing code will anyway print all contexts for a given
   5530		 * PID so this is not a problem.
   5531		 */
   5532		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
   5533			binder_debugfs_dir_entry_proc,
   5534			(void *)(unsigned long)proc->pid,
   5535			&proc_fops);
   5536	}
   5537
   5538	if (binder_binderfs_dir_entry_proc && !existing_pid) {
   5539		char strbuf[11];
   5540		struct dentry *binderfs_entry;
   5541
   5542		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
   5543		/*
   5544		 * Similar to debugfs, the process specific log file is shared
   5545		 * between contexts. Only create for the first PID.
   5546		 * This is ok since same as debugfs, the log file will contain
   5547		 * information on all contexts of a given PID.
   5548		 */
   5549		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
   5550			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
   5551		if (!IS_ERR(binderfs_entry)) {
   5552			proc->binderfs_entry = binderfs_entry;
   5553		} else {
   5554			int error;
   5555
   5556			error = PTR_ERR(binderfs_entry);
   5557			pr_warn("Unable to create file %s in binderfs (error %d)\n",
   5558				strbuf, error);
   5559		}
   5560	}
   5561
   5562	return 0;
   5563}
   5564
   5565static int binder_flush(struct file *filp, fl_owner_t id)
   5566{
   5567	struct binder_proc *proc = filp->private_data;
   5568
   5569	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
   5570
   5571	return 0;
   5572}
   5573
   5574static void binder_deferred_flush(struct binder_proc *proc)
   5575{
   5576	struct rb_node *n;
   5577	int wake_count = 0;
   5578
   5579	binder_inner_proc_lock(proc);
   5580	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
   5581		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
   5582
   5583		thread->looper_need_return = true;
   5584		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
   5585			wake_up_interruptible(&thread->wait);
   5586			wake_count++;
   5587		}
   5588	}
   5589	binder_inner_proc_unlock(proc);
   5590
   5591	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
   5592		     "binder_flush: %d woke %d threads\n", proc->pid,
   5593		     wake_count);
   5594}
   5595
   5596static int binder_release(struct inode *nodp, struct file *filp)
   5597{
   5598	struct binder_proc *proc = filp->private_data;
   5599
   5600	debugfs_remove(proc->debugfs_entry);
   5601
   5602	if (proc->binderfs_entry) {
   5603		binderfs_remove_file(proc->binderfs_entry);
   5604		proc->binderfs_entry = NULL;
   5605	}
   5606
   5607	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
   5608
   5609	return 0;
   5610}
   5611
   5612static int binder_node_release(struct binder_node *node, int refs)
   5613{
   5614	struct binder_ref *ref;
   5615	int death = 0;
   5616	struct binder_proc *proc = node->proc;
   5617
   5618	binder_release_work(proc, &node->async_todo);
   5619
   5620	binder_node_lock(node);
   5621	binder_inner_proc_lock(proc);
   5622	binder_dequeue_work_ilocked(&node->work);
   5623	/*
   5624	 * The caller must have taken a temporary ref on the node,
   5625	 */
   5626	BUG_ON(!node->tmp_refs);
   5627	if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
   5628		binder_inner_proc_unlock(proc);
   5629		binder_node_unlock(node);
   5630		binder_free_node(node);
   5631
   5632		return refs;
   5633	}
   5634
   5635	node->proc = NULL;
   5636	node->local_strong_refs = 0;
   5637	node->local_weak_refs = 0;
   5638	binder_inner_proc_unlock(proc);
   5639
   5640	spin_lock(&binder_dead_nodes_lock);
   5641	hlist_add_head(&node->dead_node, &binder_dead_nodes);
   5642	spin_unlock(&binder_dead_nodes_lock);
   5643
   5644	hlist_for_each_entry(ref, &node->refs, node_entry) {
   5645		refs++;
   5646		/*
   5647		 * Need the node lock to synchronize
   5648		 * with new notification requests and the
   5649		 * inner lock to synchronize with queued
   5650		 * death notifications.
   5651		 */
   5652		binder_inner_proc_lock(ref->proc);
   5653		if (!ref->death) {
   5654			binder_inner_proc_unlock(ref->proc);
   5655			continue;
   5656		}
   5657
   5658		death++;
   5659
   5660		BUG_ON(!list_empty(&ref->death->work.entry));
   5661		ref->death->work.type = BINDER_WORK_DEAD_BINDER;
   5662		binder_enqueue_work_ilocked(&ref->death->work,
   5663					    &ref->proc->todo);
   5664		binder_wakeup_proc_ilocked(ref->proc);
   5665		binder_inner_proc_unlock(ref->proc);
   5666	}
   5667
   5668	binder_debug(BINDER_DEBUG_DEAD_BINDER,
   5669		     "node %d now dead, refs %d, death %d\n",
   5670		     node->debug_id, refs, death);
   5671	binder_node_unlock(node);
   5672	binder_put_node(node);
   5673
   5674	return refs;
   5675}
   5676
   5677static void binder_deferred_release(struct binder_proc *proc)
   5678{
   5679	struct binder_context *context = proc->context;
   5680	struct rb_node *n;
   5681	int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
   5682
   5683	mutex_lock(&binder_procs_lock);
   5684	hlist_del(&proc->proc_node);
   5685	mutex_unlock(&binder_procs_lock);
   5686
   5687	mutex_lock(&context->context_mgr_node_lock);
   5688	if (context->binder_context_mgr_node &&
   5689	    context->binder_context_mgr_node->proc == proc) {
   5690		binder_debug(BINDER_DEBUG_DEAD_BINDER,
   5691			     "%s: %d context_mgr_node gone\n",
   5692			     __func__, proc->pid);
   5693		context->binder_context_mgr_node = NULL;
   5694	}
   5695	mutex_unlock(&context->context_mgr_node_lock);
   5696	binder_inner_proc_lock(proc);
   5697	/*
   5698	 * Make sure proc stays alive after we
   5699	 * remove all the threads
   5700	 */
   5701	proc->tmp_ref++;
   5702
   5703	proc->is_dead = true;
   5704	proc->is_frozen = false;
   5705	proc->sync_recv = false;
   5706	proc->async_recv = false;
   5707	threads = 0;
   5708	active_transactions = 0;
   5709	while ((n = rb_first(&proc->threads))) {
   5710		struct binder_thread *thread;
   5711
   5712		thread = rb_entry(n, struct binder_thread, rb_node);
   5713		binder_inner_proc_unlock(proc);
   5714		threads++;
   5715		active_transactions += binder_thread_release(proc, thread);
   5716		binder_inner_proc_lock(proc);
   5717	}
   5718
   5719	nodes = 0;
   5720	incoming_refs = 0;
   5721	while ((n = rb_first(&proc->nodes))) {
   5722		struct binder_node *node;
   5723
   5724		node = rb_entry(n, struct binder_node, rb_node);
   5725		nodes++;
   5726		/*
   5727		 * take a temporary ref on the node before
   5728		 * calling binder_node_release() which will either
   5729		 * kfree() the node or call binder_put_node()
   5730		 */
   5731		binder_inc_node_tmpref_ilocked(node);
   5732		rb_erase(&node->rb_node, &proc->nodes);
   5733		binder_inner_proc_unlock(proc);
   5734		incoming_refs = binder_node_release(node, incoming_refs);
   5735		binder_inner_proc_lock(proc);
   5736	}
   5737	binder_inner_proc_unlock(proc);
   5738
   5739	outgoing_refs = 0;
   5740	binder_proc_lock(proc);
   5741	while ((n = rb_first(&proc->refs_by_desc))) {
   5742		struct binder_ref *ref;
   5743
   5744		ref = rb_entry(n, struct binder_ref, rb_node_desc);
   5745		outgoing_refs++;
   5746		binder_cleanup_ref_olocked(ref);
   5747		binder_proc_unlock(proc);
   5748		binder_free_ref(ref);
   5749		binder_proc_lock(proc);
   5750	}
   5751	binder_proc_unlock(proc);
   5752
   5753	binder_release_work(proc, &proc->todo);
   5754	binder_release_work(proc, &proc->delivered_death);
   5755
   5756	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
   5757		     "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
   5758		     __func__, proc->pid, threads, nodes, incoming_refs,
   5759		     outgoing_refs, active_transactions);
   5760
   5761	binder_proc_dec_tmpref(proc);
   5762}
   5763
   5764static void binder_deferred_func(struct work_struct *work)
   5765{
   5766	struct binder_proc *proc;
   5767
   5768	int defer;
   5769
   5770	do {
   5771		mutex_lock(&binder_deferred_lock);
   5772		if (!hlist_empty(&binder_deferred_list)) {
   5773			proc = hlist_entry(binder_deferred_list.first,
   5774					struct binder_proc, deferred_work_node);
   5775			hlist_del_init(&proc->deferred_work_node);
   5776			defer = proc->deferred_work;
   5777			proc->deferred_work = 0;
   5778		} else {
   5779			proc = NULL;
   5780			defer = 0;
   5781		}
   5782		mutex_unlock(&binder_deferred_lock);
   5783
   5784		if (defer & BINDER_DEFERRED_FLUSH)
   5785			binder_deferred_flush(proc);
   5786
   5787		if (defer & BINDER_DEFERRED_RELEASE)
   5788			binder_deferred_release(proc); /* frees proc */
   5789	} while (proc);
   5790}
   5791static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
   5792
   5793static void
   5794binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
   5795{
   5796	mutex_lock(&binder_deferred_lock);
   5797	proc->deferred_work |= defer;
   5798	if (hlist_unhashed(&proc->deferred_work_node)) {
   5799		hlist_add_head(&proc->deferred_work_node,
   5800				&binder_deferred_list);
   5801		schedule_work(&binder_deferred_work);
   5802	}
   5803	mutex_unlock(&binder_deferred_lock);
   5804}
   5805
   5806static void print_binder_transaction_ilocked(struct seq_file *m,
   5807					     struct binder_proc *proc,
   5808					     const char *prefix,
   5809					     struct binder_transaction *t)
   5810{
   5811	struct binder_proc *to_proc;
   5812	struct binder_buffer *buffer = t->buffer;
   5813
   5814	spin_lock(&t->lock);
   5815	to_proc = t->to_proc;
   5816	seq_printf(m,
   5817		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
   5818		   prefix, t->debug_id, t,
   5819		   t->from ? t->from->proc->pid : 0,
   5820		   t->from ? t->from->pid : 0,
   5821		   to_proc ? to_proc->pid : 0,
   5822		   t->to_thread ? t->to_thread->pid : 0,
   5823		   t->code, t->flags, t->priority, t->need_reply);
   5824	spin_unlock(&t->lock);
   5825
   5826	if (proc != to_proc) {
   5827		/*
   5828		 * Can only safely deref buffer if we are holding the
   5829		 * correct proc inner lock for this node
   5830		 */
   5831		seq_puts(m, "\n");
   5832		return;
   5833	}
   5834
   5835	if (buffer == NULL) {
   5836		seq_puts(m, " buffer free\n");
   5837		return;
   5838	}
   5839	if (buffer->target_node)
   5840		seq_printf(m, " node %d", buffer->target_node->debug_id);
   5841	seq_printf(m, " size %zd:%zd data %pK\n",
   5842		   buffer->data_size, buffer->offsets_size,
   5843		   buffer->user_data);
   5844}
   5845
   5846static void print_binder_work_ilocked(struct seq_file *m,
   5847				     struct binder_proc *proc,
   5848				     const char *prefix,
   5849				     const char *transaction_prefix,
   5850				     struct binder_work *w)
   5851{
   5852	struct binder_node *node;
   5853	struct binder_transaction *t;
   5854
   5855	switch (w->type) {
   5856	case BINDER_WORK_TRANSACTION:
   5857		t = container_of(w, struct binder_transaction, work);
   5858		print_binder_transaction_ilocked(
   5859				m, proc, transaction_prefix, t);
   5860		break;
   5861	case BINDER_WORK_RETURN_ERROR: {
   5862		struct binder_error *e = container_of(
   5863				w, struct binder_error, work);
   5864
   5865		seq_printf(m, "%stransaction error: %u\n",
   5866			   prefix, e->cmd);
   5867	} break;
   5868	case BINDER_WORK_TRANSACTION_COMPLETE:
   5869		seq_printf(m, "%stransaction complete\n", prefix);
   5870		break;
   5871	case BINDER_WORK_NODE:
   5872		node = container_of(w, struct binder_node, work);
   5873		seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
   5874			   prefix, node->debug_id,
   5875			   (u64)node->ptr, (u64)node->cookie);
   5876		break;
   5877	case BINDER_WORK_DEAD_BINDER:
   5878		seq_printf(m, "%shas dead binder\n", prefix);
   5879		break;
   5880	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
   5881		seq_printf(m, "%shas cleared dead binder\n", prefix);
   5882		break;
   5883	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
   5884		seq_printf(m, "%shas cleared death notification\n", prefix);
   5885		break;
   5886	default:
   5887		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
   5888		break;
   5889	}
   5890}
   5891
   5892static void print_binder_thread_ilocked(struct seq_file *m,
   5893					struct binder_thread *thread,
   5894					int print_always)
   5895{
   5896	struct binder_transaction *t;
   5897	struct binder_work *w;
   5898	size_t start_pos = m->count;
   5899	size_t header_pos;
   5900
   5901	seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
   5902			thread->pid, thread->looper,
   5903			thread->looper_need_return,
   5904			atomic_read(&thread->tmp_ref));
   5905	header_pos = m->count;
   5906	t = thread->transaction_stack;
   5907	while (t) {
   5908		if (t->from == thread) {
   5909			print_binder_transaction_ilocked(m, thread->proc,
   5910					"    outgoing transaction", t);
   5911			t = t->from_parent;
   5912		} else if (t->to_thread == thread) {
   5913			print_binder_transaction_ilocked(m, thread->proc,
   5914						 "    incoming transaction", t);
   5915			t = t->to_parent;
   5916		} else {
   5917			print_binder_transaction_ilocked(m, thread->proc,
   5918					"    bad transaction", t);
   5919			t = NULL;
   5920		}
   5921	}
   5922	list_for_each_entry(w, &thread->todo, entry) {
   5923		print_binder_work_ilocked(m, thread->proc, "    ",
   5924					  "    pending transaction", w);
   5925	}
   5926	if (!print_always && m->count == header_pos)
   5927		m->count = start_pos;
   5928}
   5929
   5930static void print_binder_node_nilocked(struct seq_file *m,
   5931				       struct binder_node *node)
   5932{
   5933	struct binder_ref *ref;
   5934	struct binder_work *w;
   5935	int count;
   5936
   5937	count = 0;
   5938	hlist_for_each_entry(ref, &node->refs, node_entry)
   5939		count++;
   5940
   5941	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
   5942		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
   5943		   node->has_strong_ref, node->has_weak_ref,
   5944		   node->local_strong_refs, node->local_weak_refs,
   5945		   node->internal_strong_refs, count, node->tmp_refs);
   5946	if (count) {
   5947		seq_puts(m, " proc");
   5948		hlist_for_each_entry(ref, &node->refs, node_entry)
   5949			seq_printf(m, " %d", ref->proc->pid);
   5950	}
   5951	seq_puts(m, "\n");
   5952	if (node->proc) {
   5953		list_for_each_entry(w, &node->async_todo, entry)
   5954			print_binder_work_ilocked(m, node->proc, "    ",
   5955					  "    pending async transaction", w);
   5956	}
   5957}
   5958
   5959static void print_binder_ref_olocked(struct seq_file *m,
   5960				     struct binder_ref *ref)
   5961{
   5962	binder_node_lock(ref->node);
   5963	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
   5964		   ref->data.debug_id, ref->data.desc,
   5965		   ref->node->proc ? "" : "dead ",
   5966		   ref->node->debug_id, ref->data.strong,
   5967		   ref->data.weak, ref->death);
   5968	binder_node_unlock(ref->node);
   5969}
   5970
   5971static void print_binder_proc(struct seq_file *m,
   5972			      struct binder_proc *proc, int print_all)
   5973{
   5974	struct binder_work *w;
   5975	struct rb_node *n;
   5976	size_t start_pos = m->count;
   5977	size_t header_pos;
   5978	struct binder_node *last_node = NULL;
   5979
   5980	seq_printf(m, "proc %d\n", proc->pid);
   5981	seq_printf(m, "context %s\n", proc->context->name);
   5982	header_pos = m->count;
   5983
   5984	binder_inner_proc_lock(proc);
   5985	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
   5986		print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
   5987						rb_node), print_all);
   5988
   5989	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
   5990		struct binder_node *node = rb_entry(n, struct binder_node,
   5991						    rb_node);
   5992		if (!print_all && !node->has_async_transaction)
   5993			continue;
   5994
   5995		/*
   5996		 * take a temporary reference on the node so it
   5997		 * survives and isn't removed from the tree
   5998		 * while we print it.
   5999		 */
   6000		binder_inc_node_tmpref_ilocked(node);
   6001		/* Need to drop inner lock to take node lock */
   6002		binder_inner_proc_unlock(proc);
   6003		if (last_node)
   6004			binder_put_node(last_node);
   6005		binder_node_inner_lock(node);
   6006		print_binder_node_nilocked(m, node);
   6007		binder_node_inner_unlock(node);
   6008		last_node = node;
   6009		binder_inner_proc_lock(proc);
   6010	}
   6011	binder_inner_proc_unlock(proc);
   6012	if (last_node)
   6013		binder_put_node(last_node);
   6014
   6015	if (print_all) {
   6016		binder_proc_lock(proc);
   6017		for (n = rb_first(&proc->refs_by_desc);
   6018		     n != NULL;
   6019		     n = rb_next(n))
   6020			print_binder_ref_olocked(m, rb_entry(n,
   6021							    struct binder_ref,
   6022							    rb_node_desc));
   6023		binder_proc_unlock(proc);
   6024	}
   6025	binder_alloc_print_allocated(m, &proc->alloc);
   6026	binder_inner_proc_lock(proc);
   6027	list_for_each_entry(w, &proc->todo, entry)
   6028		print_binder_work_ilocked(m, proc, "  ",
   6029					  "  pending transaction", w);
   6030	list_for_each_entry(w, &proc->delivered_death, entry) {
   6031		seq_puts(m, "  has delivered dead binder\n");
   6032		break;
   6033	}
   6034	binder_inner_proc_unlock(proc);
   6035	if (!print_all && m->count == header_pos)
   6036		m->count = start_pos;
   6037}
   6038
   6039static const char * const binder_return_strings[] = {
   6040	"BR_ERROR",
   6041	"BR_OK",
   6042	"BR_TRANSACTION",
   6043	"BR_REPLY",
   6044	"BR_ACQUIRE_RESULT",
   6045	"BR_DEAD_REPLY",
   6046	"BR_TRANSACTION_COMPLETE",
   6047	"BR_INCREFS",
   6048	"BR_ACQUIRE",
   6049	"BR_RELEASE",
   6050	"BR_DECREFS",
   6051	"BR_ATTEMPT_ACQUIRE",
   6052	"BR_NOOP",
   6053	"BR_SPAWN_LOOPER",
   6054	"BR_FINISHED",
   6055	"BR_DEAD_BINDER",
   6056	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
   6057	"BR_FAILED_REPLY",
   6058	"BR_FROZEN_REPLY",
   6059	"BR_ONEWAY_SPAM_SUSPECT",
   6060};
   6061
   6062static const char * const binder_command_strings[] = {
   6063	"BC_TRANSACTION",
   6064	"BC_REPLY",
   6065	"BC_ACQUIRE_RESULT",
   6066	"BC_FREE_BUFFER",
   6067	"BC_INCREFS",
   6068	"BC_ACQUIRE",
   6069	"BC_RELEASE",
   6070	"BC_DECREFS",
   6071	"BC_INCREFS_DONE",
   6072	"BC_ACQUIRE_DONE",
   6073	"BC_ATTEMPT_ACQUIRE",
   6074	"BC_REGISTER_LOOPER",
   6075	"BC_ENTER_LOOPER",
   6076	"BC_EXIT_LOOPER",
   6077	"BC_REQUEST_DEATH_NOTIFICATION",
   6078	"BC_CLEAR_DEATH_NOTIFICATION",
   6079	"BC_DEAD_BINDER_DONE",
   6080	"BC_TRANSACTION_SG",
   6081	"BC_REPLY_SG",
   6082};
   6083
   6084static const char * const binder_objstat_strings[] = {
   6085	"proc",
   6086	"thread",
   6087	"node",
   6088	"ref",
   6089	"death",
   6090	"transaction",
   6091	"transaction_complete"
   6092};
   6093
   6094static void print_binder_stats(struct seq_file *m, const char *prefix,
   6095			       struct binder_stats *stats)
   6096{
   6097	int i;
   6098
   6099	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
   6100		     ARRAY_SIZE(binder_command_strings));
   6101	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
   6102		int temp = atomic_read(&stats->bc[i]);
   6103
   6104		if (temp)
   6105			seq_printf(m, "%s%s: %d\n", prefix,
   6106				   binder_command_strings[i], temp);
   6107	}
   6108
   6109	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
   6110		     ARRAY_SIZE(binder_return_strings));
   6111	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
   6112		int temp = atomic_read(&stats->br[i]);
   6113
   6114		if (temp)
   6115			seq_printf(m, "%s%s: %d\n", prefix,
   6116				   binder_return_strings[i], temp);
   6117	}
   6118
   6119	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
   6120		     ARRAY_SIZE(binder_objstat_strings));
   6121	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
   6122		     ARRAY_SIZE(stats->obj_deleted));
   6123	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
   6124		int created = atomic_read(&stats->obj_created[i]);
   6125		int deleted = atomic_read(&stats->obj_deleted[i]);
   6126
   6127		if (created || deleted)
   6128			seq_printf(m, "%s%s: active %d total %d\n",
   6129				prefix,
   6130				binder_objstat_strings[i],
   6131				created - deleted,
   6132				created);
   6133	}
   6134}
   6135
   6136static void print_binder_proc_stats(struct seq_file *m,
   6137				    struct binder_proc *proc)
   6138{
   6139	struct binder_work *w;
   6140	struct binder_thread *thread;
   6141	struct rb_node *n;
   6142	int count, strong, weak, ready_threads;
   6143	size_t free_async_space =
   6144		binder_alloc_get_free_async_space(&proc->alloc);
   6145
   6146	seq_printf(m, "proc %d\n", proc->pid);
   6147	seq_printf(m, "context %s\n", proc->context->name);
   6148	count = 0;
   6149	ready_threads = 0;
   6150	binder_inner_proc_lock(proc);
   6151	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
   6152		count++;
   6153
   6154	list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
   6155		ready_threads++;
   6156
   6157	seq_printf(m, "  threads: %d\n", count);
   6158	seq_printf(m, "  requested threads: %d+%d/%d\n"
   6159			"  ready threads %d\n"
   6160			"  free async space %zd\n", proc->requested_threads,
   6161			proc->requested_threads_started, proc->max_threads,
   6162			ready_threads,
   6163			free_async_space);
   6164	count = 0;
   6165	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
   6166		count++;
   6167	binder_inner_proc_unlock(proc);
   6168	seq_printf(m, "  nodes: %d\n", count);
   6169	count = 0;
   6170	strong = 0;
   6171	weak = 0;
   6172	binder_proc_lock(proc);
   6173	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
   6174		struct binder_ref *ref = rb_entry(n, struct binder_ref,
   6175						  rb_node_desc);
   6176		count++;
   6177		strong += ref->data.strong;
   6178		weak += ref->data.weak;
   6179	}
   6180	binder_proc_unlock(proc);
   6181	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
   6182
   6183	count = binder_alloc_get_allocated_count(&proc->alloc);
   6184	seq_printf(m, "  buffers: %d\n", count);
   6185
   6186	binder_alloc_print_pages(m, &proc->alloc);
   6187
   6188	count = 0;
   6189	binder_inner_proc_lock(proc);
   6190	list_for_each_entry(w, &proc->todo, entry) {
   6191		if (w->type == BINDER_WORK_TRANSACTION)
   6192			count++;
   6193	}
   6194	binder_inner_proc_unlock(proc);
   6195	seq_printf(m, "  pending transactions: %d\n", count);
   6196
   6197	print_binder_stats(m, "  ", &proc->stats);
   6198}
   6199
   6200
   6201int binder_state_show(struct seq_file *m, void *unused)
   6202{
   6203	struct binder_proc *proc;
   6204	struct binder_node *node;
   6205	struct binder_node *last_node = NULL;
   6206
   6207	seq_puts(m, "binder state:\n");
   6208
   6209	spin_lock(&binder_dead_nodes_lock);
   6210	if (!hlist_empty(&binder_dead_nodes))
   6211		seq_puts(m, "dead nodes:\n");
   6212	hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
   6213		/*
   6214		 * take a temporary reference on the node so it
   6215		 * survives and isn't removed from the list
   6216		 * while we print it.
   6217		 */
   6218		node->tmp_refs++;
   6219		spin_unlock(&binder_dead_nodes_lock);
   6220		if (last_node)
   6221			binder_put_node(last_node);
   6222		binder_node_lock(node);
   6223		print_binder_node_nilocked(m, node);
   6224		binder_node_unlock(node);
   6225		last_node = node;
   6226		spin_lock(&binder_dead_nodes_lock);
   6227	}
   6228	spin_unlock(&binder_dead_nodes_lock);
   6229	if (last_node)
   6230		binder_put_node(last_node);
   6231
   6232	mutex_lock(&binder_procs_lock);
   6233	hlist_for_each_entry(proc, &binder_procs, proc_node)
   6234		print_binder_proc(m, proc, 1);
   6235	mutex_unlock(&binder_procs_lock);
   6236
   6237	return 0;
   6238}
   6239
   6240int binder_stats_show(struct seq_file *m, void *unused)
   6241{
   6242	struct binder_proc *proc;
   6243
   6244	seq_puts(m, "binder stats:\n");
   6245
   6246	print_binder_stats(m, "", &binder_stats);
   6247
   6248	mutex_lock(&binder_procs_lock);
   6249	hlist_for_each_entry(proc, &binder_procs, proc_node)
   6250		print_binder_proc_stats(m, proc);
   6251	mutex_unlock(&binder_procs_lock);
   6252
   6253	return 0;
   6254}
   6255
   6256int binder_transactions_show(struct seq_file *m, void *unused)
   6257{
   6258	struct binder_proc *proc;
   6259
   6260	seq_puts(m, "binder transactions:\n");
   6261	mutex_lock(&binder_procs_lock);
   6262	hlist_for_each_entry(proc, &binder_procs, proc_node)
   6263		print_binder_proc(m, proc, 0);
   6264	mutex_unlock(&binder_procs_lock);
   6265
   6266	return 0;
   6267}
   6268
   6269static int proc_show(struct seq_file *m, void *unused)
   6270{
   6271	struct binder_proc *itr;
   6272	int pid = (unsigned long)m->private;
   6273
   6274	mutex_lock(&binder_procs_lock);
   6275	hlist_for_each_entry(itr, &binder_procs, proc_node) {
   6276		if (itr->pid == pid) {
   6277			seq_puts(m, "binder proc state:\n");
   6278			print_binder_proc(m, itr, 1);
   6279		}
   6280	}
   6281	mutex_unlock(&binder_procs_lock);
   6282
   6283	return 0;
   6284}
   6285
   6286static void print_binder_transaction_log_entry(struct seq_file *m,
   6287					struct binder_transaction_log_entry *e)
   6288{
   6289	int debug_id = READ_ONCE(e->debug_id_done);
   6290	/*
   6291	 * read barrier to guarantee debug_id_done read before
   6292	 * we print the log values
   6293	 */
   6294	smp_rmb();
   6295	seq_printf(m,
   6296		   "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
   6297		   e->debug_id, (e->call_type == 2) ? "reply" :
   6298		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
   6299		   e->from_thread, e->to_proc, e->to_thread, e->context_name,
   6300		   e->to_node, e->target_handle, e->data_size, e->offsets_size,
   6301		   e->return_error, e->return_error_param,
   6302		   e->return_error_line);
   6303	/*
   6304	 * read-barrier to guarantee read of debug_id_done after
   6305	 * done printing the fields of the entry
   6306	 */
   6307	smp_rmb();
   6308	seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
   6309			"\n" : " (incomplete)\n");
   6310}
   6311
   6312int binder_transaction_log_show(struct seq_file *m, void *unused)
   6313{
   6314	struct binder_transaction_log *log = m->private;
   6315	unsigned int log_cur = atomic_read(&log->cur);
   6316	unsigned int count;
   6317	unsigned int cur;
   6318	int i;
   6319
   6320	count = log_cur + 1;
   6321	cur = count < ARRAY_SIZE(log->entry) && !log->full ?
   6322		0 : count % ARRAY_SIZE(log->entry);
   6323	if (count > ARRAY_SIZE(log->entry) || log->full)
   6324		count = ARRAY_SIZE(log->entry);
   6325	for (i = 0; i < count; i++) {
   6326		unsigned int index = cur++ % ARRAY_SIZE(log->entry);
   6327
   6328		print_binder_transaction_log_entry(m, &log->entry[index]);
   6329	}
   6330	return 0;
   6331}
   6332
   6333const struct file_operations binder_fops = {
   6334	.owner = THIS_MODULE,
   6335	.poll = binder_poll,
   6336	.unlocked_ioctl = binder_ioctl,
   6337	.compat_ioctl = compat_ptr_ioctl,
   6338	.mmap = binder_mmap,
   6339	.open = binder_open,
   6340	.flush = binder_flush,
   6341	.release = binder_release,
   6342};
   6343
   6344static int __init init_binder_device(const char *name)
   6345{
   6346	int ret;
   6347	struct binder_device *binder_device;
   6348
   6349	binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
   6350	if (!binder_device)
   6351		return -ENOMEM;
   6352
   6353	binder_device->miscdev.fops = &binder_fops;
   6354	binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
   6355	binder_device->miscdev.name = name;
   6356
   6357	refcount_set(&binder_device->ref, 1);
   6358	binder_device->context.binder_context_mgr_uid = INVALID_UID;
   6359	binder_device->context.name = name;
   6360	mutex_init(&binder_device->context.context_mgr_node_lock);
   6361
   6362	ret = misc_register(&binder_device->miscdev);
   6363	if (ret < 0) {
   6364		kfree(binder_device);
   6365		return ret;
   6366	}
   6367
   6368	hlist_add_head(&binder_device->hlist, &binder_devices);
   6369
   6370	return ret;
   6371}
   6372
   6373static int __init binder_init(void)
   6374{
   6375	int ret;
   6376	char *device_name, *device_tmp;
   6377	struct binder_device *device;
   6378	struct hlist_node *tmp;
   6379	char *device_names = NULL;
   6380
   6381	ret = binder_alloc_shrinker_init();
   6382	if (ret)
   6383		return ret;
   6384
   6385	atomic_set(&binder_transaction_log.cur, ~0U);
   6386	atomic_set(&binder_transaction_log_failed.cur, ~0U);
   6387
   6388	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
   6389	if (binder_debugfs_dir_entry_root)
   6390		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
   6391						 binder_debugfs_dir_entry_root);
   6392
   6393	if (binder_debugfs_dir_entry_root) {
   6394		debugfs_create_file("state",
   6395				    0444,
   6396				    binder_debugfs_dir_entry_root,
   6397				    NULL,
   6398				    &binder_state_fops);
   6399		debugfs_create_file("stats",
   6400				    0444,
   6401				    binder_debugfs_dir_entry_root,
   6402				    NULL,
   6403				    &binder_stats_fops);
   6404		debugfs_create_file("transactions",
   6405				    0444,
   6406				    binder_debugfs_dir_entry_root,
   6407				    NULL,
   6408				    &binder_transactions_fops);
   6409		debugfs_create_file("transaction_log",
   6410				    0444,
   6411				    binder_debugfs_dir_entry_root,
   6412				    &binder_transaction_log,
   6413				    &binder_transaction_log_fops);
   6414		debugfs_create_file("failed_transaction_log",
   6415				    0444,
   6416				    binder_debugfs_dir_entry_root,
   6417				    &binder_transaction_log_failed,
   6418				    &binder_transaction_log_fops);
   6419	}
   6420
   6421	if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
   6422	    strcmp(binder_devices_param, "") != 0) {
   6423		/*
   6424		* Copy the module_parameter string, because we don't want to
   6425		* tokenize it in-place.
   6426		 */
   6427		device_names = kstrdup(binder_devices_param, GFP_KERNEL);
   6428		if (!device_names) {
   6429			ret = -ENOMEM;
   6430			goto err_alloc_device_names_failed;
   6431		}
   6432
   6433		device_tmp = device_names;
   6434		while ((device_name = strsep(&device_tmp, ","))) {
   6435			ret = init_binder_device(device_name);
   6436			if (ret)
   6437				goto err_init_binder_device_failed;
   6438		}
   6439	}
   6440
   6441	ret = init_binderfs();
   6442	if (ret)
   6443		goto err_init_binder_device_failed;
   6444
   6445	return ret;
   6446
   6447err_init_binder_device_failed:
   6448	hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
   6449		misc_deregister(&device->miscdev);
   6450		hlist_del(&device->hlist);
   6451		kfree(device);
   6452	}
   6453
   6454	kfree(device_names);
   6455
   6456err_alloc_device_names_failed:
   6457	debugfs_remove_recursive(binder_debugfs_dir_entry_root);
   6458
   6459	return ret;
   6460}
   6461
   6462device_initcall(binder_init);
   6463
   6464#define CREATE_TRACE_POINTS
   6465#include "binder_trace.h"
   6466
   6467MODULE_LICENSE("GPL v2");