/* * Locking overview * * There are 3 main spinlocks which must be acquired in the * order shown: * * 1) proc->outer_lock : protects binder_ref * binder_proc_lock() and binder_proc_unlock() are * used to acq/rel. * 2) node->lock : protects most fields of binder_node. * binder_node_lock() and binder_node_unlock() are * used to acq/rel * 3) proc->inner_lock : protects the thread and node lists * (proc->threads, proc->waiting_threads, proc->nodes) * and all todo lists associated with the binder_proc * (proc->todo, thread->todo, proc->delivered_death and * node->async_todo), as well as thread->transaction_stack * binder_inner_proc_lock() and binder_inner_proc_unlock() * are used to acq/rel * * Any lock under procA must never be nested under any lock at the same * level or below on procB. * * Functions that require a lock held on entry indicate which lock * in the suffix of the function name: * * foo_olocked() : requires node->outer_lock * foo_nlocked() : requires node->lock * foo_ilocked() : requires proc->inner_lock * foo_oilocked(): requires proc->outer_lock and proc->inner_lock * foo_nilocked(): requires node->lock and proc->inner_lock * ...
*/
struct binder_transaction_log_entry { int debug_id; int debug_id_done; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; int return_error_line;
uint32_t return_error;
uint32_t return_error_param; char context_name[BINDERFS_MAX_NAME + 1];
};
if (cur >= ARRAY_SIZE(log->entry))
log->full = true;
e = &log->entry[cur % ARRAY_SIZE(log->entry)];
WRITE_ONCE(e->debug_id_done, 0); /* * write-barrier to synchronize access to e->debug_id_done. * We make sure the initialized 0 value is seen before * memset() other fields are zeroed by memset.
*/
smp_wmb();
memset(e, 0, sizeof(*e)); return e;
}
/** * binder_worklist_empty() - Check if no items on the work list * @proc: binder_proc associated with list * @list: list to check * * Return: true if there are no items on list, else false
*/ staticbool binder_worklist_empty(struct binder_proc *proc, struct list_head *list)
{ bool ret;
binder_inner_proc_lock(proc);
ret = binder_worklist_empty_ilocked(list);
binder_inner_proc_unlock(proc); return ret;
}
/** * binder_enqueue_work_ilocked() - Add an item to the work list * @work: struct binder_work to add to list * @target_list: list to add work to * * Adds the work to the specified list. Asserts that work * is not already on a list. * * Requires the proc->inner_lock to be held.
*/ staticvoid
binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list)
{
BUG_ON(target_list == NULL);
BUG_ON(work->entry.next && !list_empty(&work->entry));
list_add_tail(&work->entry, target_list);
}
/** * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread. Doesn't set the process_todo * flag, which means that (if it wasn't already set) the thread will go to * sleep without handling this work when it calls read. * * Requires the proc->inner_lock to be held.
*/ staticvoid
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work)
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
}
/** * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread, and enables processing * of the todo queue. * * Requires the proc->inner_lock to be held.
*/ staticvoid
binder_enqueue_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work)
{
WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
/* (e)poll-based threads require an explicit wakeup signal when * queuing their own work; they rely on these events to consume * messages without I/O block. Without it, threads risk waiting * indefinitely without handling the work.
*/ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
thread->pid == current->pid && !thread->process_todo)
wake_up_interruptible_sync(&thread->wait);
thread->process_todo = true;
}
/** * binder_enqueue_thread_work() - Add an item to the thread work list * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread, and enables processing * of the todo queue.
*/ staticvoid
binder_enqueue_thread_work(struct binder_thread *thread, struct binder_work *work)
{
binder_inner_proc_lock(thread->proc);
binder_enqueue_thread_work_ilocked(thread, work);
binder_inner_proc_unlock(thread->proc);
}
/** * binder_dequeue_work() - Removes an item from the work list * @proc: binder_proc associated with list * @work: struct binder_work to remove from list * * Removes the specified work item from whatever list it is on. * Can safely be called if work is not on any list.
*/ staticvoid
binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
{
binder_inner_proc_lock(proc);
binder_dequeue_work_ilocked(work);
binder_inner_proc_unlock(proc);
}
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
thread = rb_entry(n, struct binder_thread, rb_node); if (thread->looper & BINDER_LOOPER_STATE_POLL &&
binder_available_for_proc_work_ilocked(thread)) { if (sync)
wake_up_interruptible_sync(&thread->wait); else
wake_up_interruptible(&thread->wait);
}
}
}
/** * binder_select_thread_ilocked() - selects a thread for doing proc work. * @proc: process to select a thread from * * Note that calling this function moves the thread off the waiting_threads * list, so it can only be woken up by the caller of this function, or a * signal. Therefore, callers *should* always wake up the thread this function * returns. * * Return: If there's a thread currently waiting for process work, * returns that thread. Otherwise returns NULL.
*/ staticstruct binder_thread *
binder_select_thread_ilocked(struct binder_proc *proc)
{ struct binder_thread *thread;
if (thread)
list_del_init(&thread->waiting_thread_node);
return thread;
}
/** * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. * @proc: process to wake up a thread in * @thread: specific thread to wake-up (may be NULL) * @sync: whether to do a synchronous wake-up * * This function wakes up a thread in the @proc process. * The caller may provide a specific thread to wake-up in * the @thread parameter. If @thread is NULL, this function * will wake up threads that have called poll(). * * Note that for this function to work as expected, callers * should first call binder_select_thread() to find a thread * to handle the work (if they don't have a thread already), * and pass the result into the @thread parameter.
*/ staticvoid binder_wakeup_thread_ilocked(struct binder_proc *proc, struct binder_thread *thread, bool sync)
{
assert_spin_locked(&proc->inner_lock);
if (thread) { if (sync)
wake_up_interruptible_sync(&thread->wait); else
wake_up_interruptible(&thread->wait); return;
}
/* Didn't find a thread waiting for proc work; this can happen * in two scenarios: * 1. All threads are busy handling transactions * In that case, one of those threads should call back into * the kernel driver soon and pick up this work. * 2. Threads are using the (e)poll interface, in which case * they may be blocked on the waitqueue without having been * added to waiting_threads. For this case, we just iterate * over all threads not handling transaction work, and * wake them all up. We wake all because we don't know whether * a thread that called into (e)poll is handling non-binder * work currently.
*/
binder_wakeup_poll_threads_ilocked(proc, sync);
}
if (!new_node) return NULL;
binder_inner_proc_lock(proc);
node = binder_init_node_ilocked(proc, new_node, fp);
binder_inner_proc_unlock(proc); if (node != new_node) /* * The node was already added by another thread
*/
kfree(new_node);
staticvoid binder_inc_node_tmpref_ilocked(struct binder_node *node)
{ /* * No call to binder_inc_node() is needed since we * don't need to inform userspace of any changes to * tmp_refs
*/
node->tmp_refs++;
}
/** * binder_inc_node_tmpref() - take a temporary reference on node * @node: node to reference * * Take reference on node to prevent the node from being freed * while referenced only by a local variable. The inner lock is * needed to serialize with the node work on the queue (which * isn't needed after the node is dead). If the node is dead * (node->proc is NULL), use binder_dead_nodes_lock to protect * node->tmp_refs against dead-node-only cases where the node * lock cannot be acquired (eg traversing the dead node list to * print nodes)
*/ staticvoid binder_inc_node_tmpref(struct binder_node *node)
{
binder_node_lock(node); if (node->proc)
binder_inner_proc_lock(node->proc); else
spin_lock(&binder_dead_nodes_lock);
binder_inc_node_tmpref_ilocked(node); if (node->proc)
binder_inner_proc_unlock(node->proc); else
spin_unlock(&binder_dead_nodes_lock);
binder_node_unlock(node);
}
/** * binder_dec_node_tmpref() - remove a temporary reference on node * @node: node to reference * * Release temporary reference on node taken via binder_inc_node_tmpref()
*/ staticvoid binder_dec_node_tmpref(struct binder_node *node)
{ bool free_node;
binder_node_inner_lock(node); if (!node->proc)
spin_lock(&binder_dead_nodes_lock); else
__acquire(&binder_dead_nodes_lock);
node->tmp_refs--;
BUG_ON(node->tmp_refs < 0); if (!node->proc)
spin_unlock(&binder_dead_nodes_lock); else
__release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 * causes no actual reference to be released in binder_dec_node(). * If that changes, a change is needed here too.
*/
free_node = binder_dec_node_nilocked(node, 0, 1);
binder_node_inner_unlock(node); if (free_node)
binder_free_node(node);
}
desc = offset; for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->data.desc > desc) break;
desc = ref->data.desc + 1;
}
return desc;
}
/* * Find an available reference descriptor ID. The proc->outer_lock might * be released in the process, in which case -EAGAIN is returned and the * @desc should be considered invalid.
*/ staticint get_ref_desc_olocked(struct binder_proc *proc, struct binder_node *node,
u32 *desc)
{ struct dbitmap *dmap = &proc->dmap; unsignedint nbits, offset; unsignedlong *new, bit;
/* 0 is reserved for the context manager */
offset = (node == proc->context->binder_context_mgr_node) ? 0 : 1;
if (!dbitmap_enabled(dmap)) {
*desc = slow_desc_lookup_olocked(proc, offset); return 0;
}
/* * The dbitmap is full and needs to grow. The proc->outer_lock * is briefly released to allocate the new bitmap safely.
*/
nbits = dbitmap_grow_nbits(dmap);
binder_proc_unlock(proc); new = bitmap_zalloc(nbits, GFP_KERNEL);
binder_proc_lock(proc);
dbitmap_grow(dmap, new, nbits);
return -EAGAIN;
}
/** * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref * @node: binder_node of target * @new_ref: newly allocated binder_ref to be initialized or %NULL * * Look up the ref for the given node and return it if it exists * * If it doesn't exist and the caller provides a newly allocated * ref, initialize the fields of the newly allocated ref and insert * into the given proc rb_trees and node refs list. * * Return: the ref for node. It is possible that another thread * allocated/initialized the ref first in which case the * returned ref would be different than the passed-in * new_ref. new_ref must be kfree'd by the caller in * this case.
*/ staticstruct binder_ref *binder_get_ref_for_node_olocked( struct binder_proc *proc, struct binder_node *node, struct binder_ref *new_ref)
{ struct binder_ref *ref; struct rb_node *parent; struct rb_node **p;
u32 desc;
retry:
p = &proc->refs_by_node.rb_node;
parent = NULL; while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
if (node < ref->node)
p = &(*p)->rb_left; elseif (node > ref->node)
p = &(*p)->rb_right; else return ref;
} if (!new_ref) return NULL;
/* might release the proc->outer_lock */ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN) goto retry;
if (dbitmap_enabled(dmap))
dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
binder_node_inner_lock(ref->node); if (ref->data.strong)
binder_dec_node_nilocked(ref->node, 1, 1);
hlist_del(&ref->node_entry);
delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
binder_node_inner_unlock(ref->node); /* * Clear ref->node unless we want the caller to free the node
*/ if (!delete_node) { /* * The caller uses ref->node to determine * whether the node needs to be freed. Clear * it since the node is still alive.
*/
ref->node = NULL;
}
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n",
ref->proc->pid, ref->data.debug_id,
ref->data.desc);
binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
if (ref->freeze) {
binder_dequeue_work(ref->proc, &ref->freeze->work);
binder_stats_deleted(BINDER_STAT_FREEZE);
}
binder_stats_deleted(BINDER_STAT_REF);
}
/** * binder_inc_ref_olocked() - increment the ref for given handle * @ref: ref to be incremented * @strong: if true, strong increment, else weak * @target_list: list to queue node work on * * Increment the ref. @ref->proc->outer_lock must be held on entry * * Return: 0, if successful, else errno
*/ staticint binder_inc_ref_olocked(struct binder_ref *ref, int strong, struct list_head *target_list)
{ int ret;
if (strong) { if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret;
}
ref->data.strong++;
} else { if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret;
}
ref->data.weak++;
} return 0;
}
/** * binder_dec_ref_olocked() - dec the ref for given handle * @ref: ref to be decremented * @strong: if true, strong decrement, else weak * * Decrement the ref. * * Return: %true if ref is cleaned up and ready to be freed.
*/ staticbool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{ if (strong) { if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
ref->proc->pid, ref->data.debug_id,
ref->data.desc, ref->data.strong,
ref->data.weak); returnfalse;
}
ref->data.strong--; if (ref->data.strong == 0)
binder_dec_node(ref->node, strong, 1);
} else { if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
ref->proc->pid, ref->data.debug_id,
ref->data.desc, ref->data.strong,
ref->data.weak); returnfalse;
}
ref->data.weak--;
} if (ref->data.strong == 0 && ref->data.weak == 0) {
binder_cleanup_ref_olocked(ref); returntrue;
} returnfalse;
}
/** * binder_get_node_from_ref() - get the node from the given proc/desc * @proc: proc containing the ref * @desc: the handle associated with the ref * @need_strong_ref: if true, only return node if ref is strong * @rdata: the id/refcount data for the ref * * Given a proc and ref handle, return the associated binder_node * * Return: a binder_node or NULL if not found or not strong when strong required
*/ staticstruct binder_node *binder_get_node_from_ref( struct binder_proc *proc,
u32 desc, bool need_strong_ref, struct binder_ref_data *rdata)
{ struct binder_node *node; struct binder_ref *ref;
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, desc, need_strong_ref); if (!ref) goto err_no_ref;
node = ref->node; /* * Take an implicit reference on the node to ensure * it stays alive until the call to binder_put_node()
*/
binder_inc_node_tmpref(node); if (rdata)
*rdata = ref->data;
binder_proc_unlock(proc);
/** * binder_free_ref() - free the binder_ref * @ref: ref to free * * Free the binder_ref. Free the binder_node indicated by ref->node * (if non-NULL) and the binder_ref_death indicated by ref->death.
*/ staticvoid binder_free_ref(struct binder_ref *ref)
{ if (ref->node)
binder_free_node(ref->node);
kfree(ref->death);
kfree(ref->freeze);
kfree(ref);
}
/* shrink descriptor bitmap if needed */ staticvoid try_shrink_dmap(struct binder_proc *proc)
{ unsignedlong *new; int nbits;
new = bitmap_zalloc(nbits, GFP_KERNEL);
binder_proc_lock(proc);
dbitmap_shrink(&proc->dmap, new, nbits);
binder_proc_unlock(proc);
}
/** * binder_update_ref_for_handle() - inc/dec the ref for given handle * @proc: proc containing the ref * @desc: the handle associated with the ref * @increment: true=inc reference, false=dec reference * @strong: true=strong reference, false=weak reference * @rdata: the id/refcount data for the ref * * Given a proc and ref handle, increment or decrement the ref * according to "increment" arg. * * Return: 0 if successful, else errno
*/ staticint binder_update_ref_for_handle(struct binder_proc *proc,
uint32_t desc, bool increment, bool strong, struct binder_ref_data *rdata)
{ int ret = 0; struct binder_ref *ref; bool delete_ref = false;
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, desc, strong); if (!ref) {
ret = -EINVAL; goto err_no_ref;
} if (increment)
ret = binder_inc_ref_olocked(ref, strong, NULL); else
delete_ref = binder_dec_ref_olocked(ref, strong);
if (rdata)
*rdata = ref->data;
binder_proc_unlock(proc);
if (delete_ref) {
binder_free_ref(ref);
try_shrink_dmap(proc);
} return ret;
/** * binder_dec_ref_for_handle() - dec the ref for given handle * @proc: proc containing the ref * @desc: the handle associated with the ref * @strong: true=strong reference, false=weak reference * @rdata: the id/refcount data for the ref * * Just calls binder_update_ref_for_handle() to decrement the ref. * * Return: 0 if successful, else errno
*/ staticint binder_dec_ref_for_handle(struct binder_proc *proc,
uint32_t desc, bool strong, struct binder_ref_data *rdata)
{ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
}
/** * binder_inc_ref_for_node() - increment the ref for given proc/node * @proc: proc containing the ref * @node: target node * @strong: true=strong reference, false=weak reference * @target_list: worklist to use if node is incremented * @rdata: the id/refcount data for the ref * * Given a proc and node, increment the ref. Create the ref if it * doesn't already exist * * Return: 0 if successful, else errno
*/ staticint binder_inc_ref_for_node(struct binder_proc *proc, struct binder_node *node, bool strong, struct list_head *target_list, struct binder_ref_data *rdata)
{ struct binder_ref *ref; struct binder_ref *new_ref = NULL; int ret = 0;
binder_proc_lock(proc);
ref = binder_get_ref_for_node_olocked(proc, node, NULL); if (!ref) {
binder_proc_unlock(proc);
new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!new_ref) return -ENOMEM;
binder_proc_lock(proc);
ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data; if (ret && ref == new_ref) { /* * Cleanup the failed reference here as the target * could now be dead and have already released its * references by now. Calling on the new reference * with strong=0 and a tmp_refs will not decrement * the node. The new_ref gets kfree'd below.
*/
binder_cleanup_ref_olocked(new_ref);
ref = NULL;
}
binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* * Another thread created the ref first so * free the one we allocated
*/
kfree(new_ref); return ret;
}
/** * binder_thread_dec_tmpref() - decrement thread->tmp_ref * @thread: thread to decrement * * A thread needs to be kept alive while being used to create or * handle a transaction. binder_get_txn_from() is used to safely * extract t->from from a binder_transaction and keep the thread * indicated by t->from from being freed. When done with that * binder_thread, this function is called to decrement the * tmp_ref and free if appropriate (thread has been released * and no transaction being processed by the driver)
*/ staticvoid binder_thread_dec_tmpref(struct binder_thread *thread)
{ /* * atomic is used to protect the counter value while * it cannot reach zero or thread->is_dead is false
*/
binder_inner_proc_lock(thread->proc);
atomic_dec(&thread->tmp_ref); if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
binder_inner_proc_unlock(thread->proc);
binder_free_thread(thread); return;
}
binder_inner_proc_unlock(thread->proc);
}
/** * binder_proc_dec_tmpref() - decrement proc->tmp_ref * @proc: proc to decrement * * A binder_proc needs to be kept alive while being used to create or * handle a transaction. proc->tmp_ref is incremented when * creating a new transaction or the binder_proc is currently in-use * by threads that are being released. When done with the binder_proc, * this function is called to decrement the counter and free the * proc if appropriate (proc has been released, all threads have * been released and not currently in-use to process a transaction).
*/ staticvoid binder_proc_dec_tmpref(struct binder_proc *proc)
{
binder_inner_proc_lock(proc);
proc->tmp_ref--; if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
!proc->tmp_ref) {
binder_inner_proc_unlock(proc);
binder_free_proc(proc); return;
}
binder_inner_proc_unlock(proc);
}
/** * binder_get_txn_from() - safely extract the "from" thread in transaction * @t: binder transaction for t->from * * Atomically return the "from" thread and increment the tmp_ref * count for the thread to ensure it stays alive until * binder_thread_dec_tmpref() is called. * * Return: the value of t->from
*/ staticstruct binder_thread *binder_get_txn_from( struct binder_transaction *t)
{ struct binder_thread *from;
guard(spinlock)(&t->lock);
from = t->from; if (from)
atomic_inc(&from->tmp_ref); return from;
}
/** * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock * @t: binder transaction for t->from * * Same as binder_get_txn_from() except it also acquires the proc->inner_lock * to guarantee that the thread cannot be released while operating on it. * The caller must call binder_inner_proc_unlock() to release the inner lock * as well as call binder_dec_thread_txn() to release the reference. * * Return: the value of t->from
*/ staticstruct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t)
__acquires(&t->from->proc->inner_lock)
{ struct binder_thread *from;
from = binder_get_txn_from(t); if (!from) {
__acquire(&from->proc->inner_lock); return NULL;
}
binder_inner_proc_lock(from->proc); if (t->from) {
BUG_ON(from != t->from); return from;
}
binder_inner_proc_unlock(from->proc);
__acquire(&from->proc->inner_lock);
binder_thread_dec_tmpref(from); return NULL;
}
/** * binder_free_txn_fixups() - free unprocessed fd fixups * @t: binder transaction for t->from * * If the transaction is being torn down prior to being * processed by the target process, free all of the * fd fixups and fput the file structs. It is safe to * call this function after the fixups have been * processed -- in that case, the list will be empty.
*/ staticvoid binder_free_txn_fixups(struct binder_transaction *t)
{ struct binder_txn_fd_fixup *fixup, *tmp;
if (target_proc) {
binder_inner_proc_lock(target_proc);
target_proc->outstanding_txns--; if (target_proc->outstanding_txns < 0)
pr_warn("%s: Unexpected outstanding_txns %d\n",
__func__, target_proc->outstanding_txns); if (!target_proc->outstanding_txns && target_proc->is_frozen)
wake_up_interruptible_all(&target_proc->freeze_wait); if (t->buffer)
t->buffer->transaction = NULL;
binder_inner_proc_unlock(target_proc);
} if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t); /* * If the transaction has no target_proc, then * t->buffer->transaction has already been cleared.
*/
binder_free_txn_fixups(t);
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
BUG_ON(t->flags & TF_ONE_WAY); while (1) {
target_thread = binder_get_txn_from_and_acq_inner(t); if (target_thread) {
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d to %d:%d\n",
t->debug_id,
target_thread->proc->pid,
target_thread->pid);
binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
binder_enqueue_thread_work_ilocked(
target_thread,
&target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else { /* * Cannot get here for normal operation, but * we can if multiple synchronous transactions * are sent without blocking for responses. * Just ignore the 2nd error in this case.
*/
pr_warn("Unexpected reply error: %u\n",
target_thread->reply_error.cmd);
}
binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread);
binder_free_transaction(t); return;
}
__release(&target_thread->proc->inner_lock);
next = t->from_parent;
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d, target dead\n",
t->debug_id);
binder_free_transaction(t); if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); return;
}
t = next;
binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread -- retry %d\n",
t->debug_id);
}
}
/** * binder_cleanup_transaction() - cleans up undelivered transaction * @t: transaction that needs to be cleaned up * @reason: reason the transaction wasn't delivered * @error_code: error to return to caller (if synchronous call)
*/ staticvoid binder_cleanup_transaction(struct binder_transaction *t, constchar *reason,
uint32_t error_code)
{ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
binder_send_failed_reply(t, error_code);
} else {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d, %s\n",
t->debug_id, reason);
binder_free_transaction(t);
}
}
/** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * * Copy the binder object at the given offset into @object. If @u is * provided then the copy is from the sender's buffer. If not, then * it is copied from the target's @buffer. * * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object.
*/ static size_t binder_get_object(struct binder_proc *proc, constvoid __user *u, struct binder_buffer *buffer, unsignedlong offset, struct binder_object *object)
{
size_t read_size; struct binder_object_header *hdr;
size_t object_size = 0;
if (u) { if (copy_from_user(object, u + offset, read_size)) return 0;
} else { if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
offset, read_size)) return 0;
}
/* Ok, now see if we read a complete object. */
hdr = &object->hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE:
object_size = sizeof(struct flat_binder_object); break; case BINDER_TYPE_FD:
object_size = sizeof(struct binder_fd_object); break; case BINDER_TYPE_PTR:
object_size = sizeof(struct binder_buffer_object); break; case BINDER_TYPE_FDA:
object_size = sizeof(struct binder_fd_array_object); break; default: return 0;
} if (offset <= buffer->data_size - object_size &&
buffer->data_size >= object_size) return object_size; else return 0;
}
/** * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. * @proc: binder_proc owning the buffer * @b: binder_buffer containing the object * @object: struct binder_object to read into * @index: index in offset array at which the binder_buffer_object is * located * @start_offset: points to the start of the offset array * @object_offsetp: offset of @object read from @b * @num_valid: the number of valid offsets in the offset array * * Return: If @index is within the valid range of the offset array * described by @start and @num_valid, and if there's a valid * binder_buffer_object at the offset found in index @index * of the offset array, that object is returned. Otherwise, * %NULL is returned. * Note that the offset found in index @index itself is not * verified; this function assumes that @num_valid elements * from @start were previously verified to have valid offsets. * If @object_offsetp is non-NULL, then the offset within * @b is written to it.
*/ staticstruct binder_buffer_object *binder_validate_ptr( struct binder_proc *proc, struct binder_buffer *b, struct binder_object *object,
binder_size_t index,
binder_size_t start_offset,
binder_size_t *object_offsetp,
binder_size_t num_valid)
{
size_t object_size;
binder_size_t object_offset; unsignedlong buffer_offset;
/** * binder_validate_fixup() - validates pointer/fd fixups happen in order. * @proc: binder_proc owning the buffer * @b: transaction buffer * @objects_start_offset: offset to start of objects buffer * @buffer_obj_offset: offset to binder_buffer_object in which to fix up * @fixup_offset: start offset in @buffer to fix up * @last_obj_offset: offset to last binder_buffer_object that we fixed * @last_min_offset: minimum fixup offset in object at @last_obj_offset * * Return: %true if a fixup in buffer @buffer at offset @offset is * allowed. * * For safety reasons, we only allow fixups inside a buffer to happen * at increasing offsets; additionally, we only allow fixup on the last * buffer object that was verified, or one of its parents. * * Example of what is allowed: * * A * B (parent = A, offset = 0) * C (parent = A, offset = 16) * D (parent = C, offset = 0) * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) * * Examples of what is not allowed: * * Decreasing offsets within the same parent: * A * C (parent = A, offset = 16) * B (parent = A, offset = 0) // decreasing offset within A * * Referring to a parent that wasn't the last object or any of its parents: * A * B (parent = A, offset = 0) * C (parent = A, offset = 0) * C (parent = A, offset = 16) * D (parent = B, offset = 0) // B is not A or any of A's parents
*/ staticbool binder_validate_fixup(struct binder_proc *proc, struct binder_buffer *b,
binder_size_t objects_start_offset,
binder_size_t buffer_obj_offset,
binder_size_t fixup_offset,
binder_size_t last_obj_offset,
binder_size_t last_min_offset)
{ if (!last_obj_offset) { /* Nothing to fix up in */ returnfalse;
}
last_bbo = &last_object.bbo; /* * Safe to retrieve the parent of last_obj, since it * was already previously verified by the driver.
*/ if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) returnfalse;
last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
buffer_offset = objects_start_offset + sizeof(binder_size_t) * last_bbo->parent; if (binder_alloc_copy_from_buffer(&proc->alloc,
&last_obj_offset,
b, buffer_offset, sizeof(last_obj_offset))) returnfalse;
} return (fixup_offset >= last_min_offset);
}
/** * struct binder_task_work_cb - for deferred close * * @twork: callback_head for task work * @file: file to close * * Structure to pass task work to be handled after * returning from binder_ioctl() via task_work_add().
*/ struct binder_task_work_cb { struct callback_head twork; struct file *file;
};
/** * binder_do_fd_close() - close list of file descriptors * @twork: callback head for task work * * It is not safe to call ksys_close() during the binder_ioctl() * function if there is a chance that binder's own file descriptor * might be closed. This is to meet the requirements for using * fdget() (see comments for __fget_light()). Therefore use * task_work_add() to schedule the close operation once we have * returned from binder_ioctl(). This function is a callback * for that mechanism and does the actual ksys_close() on the * given file descriptor.
*/ staticvoid binder_do_fd_close(struct callback_head *twork)
{ struct binder_task_work_cb *twcb = container_of(twork, struct binder_task_work_cb, twork);
fput(twcb->file);
kfree(twcb);
}
/** * binder_deferred_fd_close() - schedule a close for the given file-descriptor * @fd: file-descriptor to close * * See comments in binder_do_fd_close(). This function is used to schedule * a file-descriptor to be closed after returning from binder_ioctl().
*/ staticvoid binder_deferred_fd_close(int fd)
{ struct binder_task_work_cb *twcb;
twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); if (!twcb) return;
init_task_work(&twcb->twork, binder_do_fd_close);
twcb->file = file_close_fd(fd); if (twcb->file) { // pin it until binder_do_fd_close(); see comments there
get_file(twcb->file);
filp_close(twcb->file, current->files);
task_work_add(current, &twcb->twork, TWA_RESUME);
} else {
kfree(twcb);
}
}
if (ret) {
pr_err("transaction release %d bad handle %d, ret = %d\n",
debug_id, fp->handle, ret); break;
}
binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d\n",
rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: { /* * No need to close the file here since user-space * closes it for successfully delivered * transactions. For transactions that weren't * delivered, the new fd was never allocated so * there is no need to close and the fput on the * file is done when the transaction is torn * down.
*/
} break; case BINDER_TYPE_PTR: /* * Nothing to do here, this will get cleaned up when the * transaction buffer gets freed
*/ break; case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda; struct binder_buffer_object *parent; struct binder_object ptr_object;
binder_size_t fda_offset;
size_t fd_index;
binder_size_t fd_buf_size;
binder_size_t num_valid;
if (is_failure) { /* * The fd fixups have not been applied so no * fds need to be closed.
*/ continue;
}
num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t);
fda = to_binder_fd_array_object(hdr);
parent = binder_validate_ptr(proc, buffer, &ptr_object,
fda->parent,
off_start_offset,
NULL,
num_valid); if (!parent) {
pr_err("transaction release %d bad parent offset\n",
debug_id); continue;
}
fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
pr_err("transaction release %d invalid number of fds (%lld)\n",
debug_id, (u64)fda->num_fds); continue;
} if (fd_buf_size > parent->length ||
fda->parent_offset > parent->length - fd_buf_size) { /* No space for all file descriptors here. */
pr_err("transaction release %d not enough space for %lld fds in buffer\n",
debug_id, (u64)fda->num_fds); continue;
} /* * the source data for binder_buffer_object is visible * to user-space and the @buffer element is the user * pointer to the buffer_object containing the fd_array. * Convert the address to an offset relative to * the base of the transaction buffer.
*/
fda_offset = parent->buffer - buffer->user_data +
fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds;
fd_index++) {
u32 fd; int err;
binder_size_t offset = fda_offset +
fd_index * sizeof(fd);
err = binder_alloc_copy_from_buffer(
&proc->alloc, &fd, buffer,
offset, sizeof(fd));
WARN_ON(err); if (!err) {
binder_deferred_fd_close(fd); /* * Need to make sure the thread goes * back to userspace to complete the * deferred close
*/ if (thread)
thread->looper_need_return = true;
}
}
} break; default:
pr_err("transaction release %d bad object type %x\n",
debug_id, hdr->type); break;
}
}
}
/* Clean up all the objects in the buffer */ staticinlinevoid binder_release_entire_buffer(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, bool is_failure)
{
binder_size_t off_end_offset;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.