while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
if (user_ptr < buffer->user_data) {
n = n->rb_left;
} elseif (user_ptr > buffer->user_data) {
n = n->rb_right;
} else { /* * Guard against user threads attempting to * free the buffer when in use by kernel or * after it's already been freed.
*/ if (!buffer->allow_user_free) return ERR_PTR(-EPERM);
buffer->allow_user_free = 0; return buffer;
}
} return NULL;
}
/** * binder_alloc_prepare_to_free() - get buffer given user ptr * @alloc: binder_alloc for this proc * @user_ptr: User pointer to buffer data * * Validate userspace pointer to buffer data and return buffer corresponding to * that user pointer. Search the rb tree for buffer that matches user data * pointer. * * Return: Pointer to buffer or NULL
*/ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, unsignedlong user_ptr)
{
guard(mutex)(&alloc->mutex); return binder_alloc_prepare_to_free_locked(alloc, user_ptr);
}
staticinlinevoid
binder_set_installed_page(struct binder_alloc *alloc, unsignedlong index, struct page *page)
{ /* Pairs with acquire in binder_get_installed_page() */
smp_store_release(&alloc->pages[index], page);
}
staticinlinestruct page *
binder_get_installed_page(struct binder_alloc *alloc, unsignedlong index)
{ /* Pairs with release in binder_set_installed_page() */ return smp_load_acquire(&alloc->pages[index]);
}
/* * Find an existing page in the remote mm. If missing, * don't attempt to fault-in just propagate an error.
*/
mmap_read_lock(mm); if (binder_alloc_is_mapped(alloc))
npages = get_user_pages_remote(mm, addr, 1, FOLL_NOFAULT,
&page, NULL);
mmap_read_unlock(mm);
/* attempt per-vma lock first */
vma = lock_vma_under_rcu(mm, addr); if (vma) { if (binder_alloc_is_mapped(alloc))
ret = vm_insert_page(vma, addr, page);
vma_end_read(vma); return ret;
}
/* fall back to mmap_lock */
mmap_read_lock(mm);
vma = vma_lookup(mm, addr); if (vma && binder_alloc_is_mapped(alloc))
ret = vm_insert_page(vma, addr, page);
mmap_read_unlock(mm);
page = binder_page_alloc(alloc, index); if (!page) {
ret = -ENOMEM; goto out;
}
ret = binder_page_insert(alloc, addr, page); switch (ret) { case -EBUSY: /* * EBUSY is ok. Someone installed the pte first but the * alloc->pages[index] has not been updated yet. Discard * our page and look up the one already installed.
*/
ret = 0;
binder_free_page(page);
page = binder_page_lookup(alloc, addr); if (!page) {
pr_err("%d: failed to find page at offset %lx\n",
alloc->pid, addr - alloc->vm_start);
ret = -ESRCH; break;
}
fallthrough; case 0: /* Mark page installation complete and safe to use */
binder_set_installed_page(alloc, index, page); break; default:
binder_free_page(page);
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
alloc->pid, __func__, addr - alloc->vm_start, ret); break;
}
out:
mmput_async(alloc->mm); return ret;
}
start = buffer->user_data & PAGE_MASK;
final = PAGE_ALIGN(buffer->user_data + size);
for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) { unsignedlong index; int ret;
index = (page_addr - alloc->vm_start) / PAGE_SIZE; if (binder_get_installed_page(alloc, index)) continue;
trace_binder_alloc_page_start(alloc, index);
ret = binder_install_single_page(alloc, index, page_addr); if (ret) return ret;
trace_binder_alloc_page_end(alloc, index);
}
return 0;
}
/* The range of pages should exclude those shared with other buffers */ staticvoid binder_lru_freelist_del(struct binder_alloc *alloc, unsignedlong start, unsignedlong end)
{ unsignedlong page_addr; struct page *page;
staticbool debug_low_async_space_locked(struct binder_alloc *alloc)
{ /* * Find the amount and size of buffers allocated by the current caller; * The idea is that once we cross the threshold, whoever is responsible * for the low async space is likely to try to send another async txn, * and at some point we'll catch them in the act. This is more efficient * than keeping a map per pid.
*/ struct binder_buffer *buffer;
size_t total_alloc_size = 0; int pid = current->tgid;
size_t num_buffers = 0; struct rb_node *n;
/* * Only start detecting spammers once we have less than 20% of async * space left (which is less than 10% of total buffer size).
*/ if (alloc->free_async_space >= alloc->buffer_size / 10) {
alloc->oneway_spam_detected = false; returnfalse;
}
for (n = rb_first(&alloc->allocated_buffers); n != NULL;
n = rb_next(n)) {
buffer = rb_entry(n, struct binder_buffer, rb_node); if (buffer->pid != pid) continue; if (!buffer->async_transaction) continue;
total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
num_buffers++;
}
/* * Warn if this pid has more than 50 transactions, or more than 50% of * async space (which is 25% of total buffer size). Oneway spam is only * detected when the threshold is exceeded.
*/ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
alloc->pid, pid, num_buffers, total_alloc_size); if (!alloc->oneway_spam_detected) {
alloc->oneway_spam_detected = true; returntrue;
}
} returnfalse;
}
/* Callers preallocate @new_buffer, it is freed by this function if unused */ staticstruct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, struct binder_buffer *new_buffer,
size_t size, int is_async)
{ struct rb_node *n = alloc->free_buffers.rb_node; struct rb_node *best_fit = NULL; struct binder_buffer *buffer; unsignedlong next_used_page; unsignedlong curr_last_page;
size_t buffer_size;
if (is_async && alloc->free_async_space < size) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
buffer = ERR_PTR(-ENOSPC); goto out;
}
if (buffer_size != size) { /* Found an oversized buffer and needs to be split */
buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
/* * Now we remove the pages from the freelist. A clever calculation * with buffer_size determines if the last page is shared with an * adjacent in-use buffer. In such case, the page has been already * removed from the freelist so we trim our range short.
*/
next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
curr_last_page = PAGE_ALIGN(buffer->user_data + size);
binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
min(next_used_page, curr_last_page));
/* Calculate the sanitized total size, returns 0 for invalid request */ staticinline size_t sanitized_size(size_t data_size,
size_t offsets_size,
size_t extra_buffers_size)
{
size_t total, tmp;
/* Align to pointer size and check for overflows */
tmp = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *)); if (tmp < data_size || tmp < offsets_size) return 0;
total = tmp + ALIGN(extra_buffers_size, sizeof(void *)); if (total < tmp || total < extra_buffers_size) return 0;
/* Pad 0-sized buffers so they get a unique address */
total = max(total, sizeof(void *));
return total;
}
/** * binder_alloc_new_buf() - Allocate a new binder buffer * @alloc: binder_alloc for this proc * @data_size: size of user data buffer * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated * is the sum of the three given sizes (each rounded up to * pointer-sized boundary) * * Return: The allocated buffer or %ERR_PTR(-errno) if error
*/ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size, int is_async)
{ struct binder_buffer *buffer, *next;
size_t size; int ret;
/* Check binder_alloc is fully initialized */ if (!binder_alloc_is_mapped(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n",
alloc->pid); return ERR_PTR(-ESRCH);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
next = binder_buffer_next(buffer); if (buffer_start_page(next) == buffer_start_page(buffer)) goto skip_freelist;
}
/** * binder_alloc_get_page() - get kernel pointer for given buffer offset * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @pgoffp: address to copy final page offset to * * Lookup the struct page corresponding to the address * at @buffer_offset into @buffer->user_data. If @pgoffp is not * NULL, the byte-offset into the page is written there. * * The caller is responsible to ensure that the offset points * to a valid address within the @buffer and that @buffer is * not freeable by the user. Since it can't be freed, we are * guaranteed that the corresponding elements of @alloc->pages[] * cannot change. * * Return: struct page
*/ staticstruct page *binder_alloc_get_page(struct binder_alloc *alloc, struct binder_buffer *buffer,
binder_size_t buffer_offset,
pgoff_t *pgoffp)
{
binder_size_t buffer_space_offset = buffer_offset +
(buffer->user_data - alloc->vm_start);
pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
size_t index = buffer_space_offset >> PAGE_SHIFT;
*pgoffp = pgoff;
return alloc->pages[index];
}
/** * binder_alloc_clear_buf() - zero out buffer * @alloc: binder_alloc for this proc * @buffer: binder buffer to be cleared * * memset the given buffer to 0
*/ staticvoid binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer)
{
size_t bytes = binder_alloc_buffer_size(alloc, buffer);
binder_size_t buffer_offset = 0;
while (bytes) { unsignedlong size; struct page *page;
pgoff_t pgoff;
/** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc * @buffer: kernel pointer to buffer * * Free the buffer allocated via binder_alloc_new_buf()
*/ void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer)
{ /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_free_buf_locked(). However, that could * increase contention for the alloc mutex if clear_on_free * is used frequently for large buffers. The mutex is not * needed for correctness here.
*/ if (buffer->clear_on_free) {
binder_alloc_clear_buf(alloc, buffer);
buffer->clear_on_free = false;
}
mutex_lock(&alloc->mutex);
binder_free_buf_locked(alloc, buffer);
mutex_unlock(&alloc->mutex);
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_free_buf);
/** * binder_alloc_mmap_handler() - map virtual address space for proc * @alloc: alloc structure for this proc * @vma: vma passed to mmap() * * Called by binder_mmap() to initialize the space specified in * vma for allocating binder buffers * * Return: * 0 = success * -EBUSY = address space already mapped * -ENOMEM = failed to map memory to given address space
*/ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma)
{ struct binder_buffer *buffer; constchar *failure_string; int ret;
if (unlikely(vma->vm_mm != alloc->mm)) {
ret = -EINVAL;
failure_string = "invalid vma->vm_mm"; goto err_invalid_mm;
}
/** * binder_alloc_print_allocated() - print buffer info * @m: seq_file for output via seq_printf() * @alloc: binder_alloc for this proc * * Prints information about every buffer associated with * the binder_alloc state to the given seq_file
*/ void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc)
{ struct binder_buffer *buffer; struct rb_node *n;
/** * binder_alloc_print_pages() - print page usage * @m: seq_file for output via seq_printf() * @alloc: binder_alloc for this proc
*/ void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc)
{ struct page *page; int i; int active = 0; int lru = 0; int free = 0;
mutex_lock(&alloc->mutex); /* * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state.
*/ if (binder_alloc_is_mapped(alloc)) { for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
page = binder_get_installed_page(alloc, i); if (!page)
free++; elseif (list_empty(page_to_lru(page)))
active++; else
lru++;
}
}
mutex_unlock(&alloc->mutex);
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
}
/** * binder_alloc_get_allocated_count() - return count of buffers * @alloc: binder_alloc for this proc * * Return: count of allocated buffers
*/ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
{ struct rb_node *n; int count = 0;
guard(mutex)(&alloc->mutex); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
count++; return count;
}
/** * binder_alloc_vma_close() - invalidate address space * @alloc: binder_alloc for this proc * * Called from binder_vma_close() when releasing address space. * Clears alloc->mapped to prevent new incoming transactions from * allocating more buffers.
*/ void binder_alloc_vma_close(struct binder_alloc *alloc)
{
binder_alloc_set_mapped(alloc, false);
}
EXPORT_SYMBOL_IF_KUNIT(binder_alloc_vma_close);
/** * binder_alloc_free_page() - shrinker callback to free pages * @item: item to free * @lru: list_lru instance of the item * @cb_arg: callback argument * * Called from list_lru_walk() in binder_shrink_scan() to free * up pages when the system is under memory pressure.
*/ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, void *cb_arg)
__must_hold(&lru->lock)
{ struct binder_shrinker_mdata *mdata = container_of(item, typeof(*mdata), lru); struct binder_alloc *alloc = mdata->alloc; struct mm_struct *mm = alloc->mm; struct vm_area_struct *vma; struct page *page_to_free; unsignedlong page_addr; int mm_locked = 0;
size_t index;
if (!mmget_not_zero(mm)) goto err_mmget;
index = mdata->page_index;
page_addr = alloc->vm_start + index * PAGE_SIZE;
/* attempt per-vma lock first */
vma = lock_vma_under_rcu(mm, page_addr); if (!vma) { /* fall back to mmap_lock */ if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed;
mm_locked = 1;
vma = vma_lookup(mm, page_addr);
}
if (!mutex_trylock(&alloc->mutex)) goto err_get_alloc_mutex_failed;
/* * Since a binder_alloc can only be mapped once, we ensure * the vma corresponds to this mapping by checking whether * the binder_alloc is still mapped.
*/ if (vma && !binder_alloc_is_mapped(alloc)) goto err_invalid_vma;
/** * binder_alloc_init() - called by binder_open() for per-proc initialization * @alloc: binder_alloc for this proc * * Called from binder_open() to initialize binder_alloc fields for * new binder proc
*/ void binder_alloc_init(struct binder_alloc *alloc)
{
__binder_alloc_init(alloc, &binder_freelist);
}
int binder_alloc_shrinker_init(void)
{ int ret;
ret = list_lru_init(&binder_freelist); if (ret) return ret;
/** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @offset: offset into @buffer data * @bytes: bytes to access from offset * * Check that the @offset/@bytes are within the size of the given * @buffer and that the buffer is currently active and not freeable. * Offsets must also be multiples of sizeof(u32). The kernel is * allowed to touch the buffer in two cases: * * 1) when the buffer is being created: * (buffer->free == 0 && buffer->allow_user_free == 0) * 2) when the buffer is being torn down: * (buffer->free == 0 && buffer->transaction == NULL). * * Return: true if the buffer is safe to access
*/ staticinlinebool check_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer,
binder_size_t offset, size_t bytes)
{
size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.