// SPDX-License-Identifier: GPL-2.0-or-later /* * NET3: Garbage Collector For AF_UNIX sockets * * Garbage Collector: * Copyright (C) Barak A. Pearlmutter. * * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. * If it doesn't work blame me, it worked when Barak sent it. * * Assumptions: * * - object w/ a bit * - free list * * Current optimizations: * * - explicit stack instead of recursion * - tail recurse on first born instead of immediate push/pop * - we gather the stuff that should not be killed into tree * and stack is just a path from root to the current pointer. * * Future optimizations: * * - don't just push entire root set; process in place * * Fixes: * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. * Cope with changing max_files. * Al Viro 11 Oct 1998 * Graph may have cycles. That is, we can send the descriptor * of foo to bar and vice versa. Current code chokes on that. * Fix: move SCM_RIGHTS ones into the separate list and then * skb_free() them all instead of doing explicit fput's. * Another problem: since fput() may block somebody may * create a new unix_socket when we are in the middle of sweep * phase. Fix: revert the logic wrt MARKED. Mark everything * upon the beginning and unmark non-junk ones. * * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS * sent to connect()'ed but still not accept()'ed sockets. * Fixed. Old code had slightly different problem here: * extra fput() in situation when we passed the descriptor via * such socket and closed it (descriptor). That would happen on * each unix_gc() until the accept(). Since the struct file in * question would go to the free list and might be reused... * That might be the reason of random oopses on filp_close() * in unrelated processes. * * AV 28 Feb 1999 * Kill the explicit allocation of stack. Now we keep the tree * with root in dummy + pointer (gc_current) to one of the nodes. * Stack is represented as path from gc_current to dummy. Unmark * now means "add to tree". Push == "make it a son of gc_current". * Pop == "move gc_current to parent". We keep only pointers to * parents (->gc_tree). * AV 1 Mar 1999 * Damn. Added missing check for ->dead in listen queues scanning. * * Miklos Szeredi 25 Jun 2007 * Reimplement with a cycle collecting algorithm. This should * solve several problems with the previous code, like being racy * wrt receive and holding up unrelated socket operations.
*/
staticstruct unix_vertex *unix_edge_successor(struct unix_edge *edge)
{ /* If an embryo socket has a fd, * the listener indirectly holds the fd's refcnt.
*/ if (edge->successor->listener) return unix_sk(edge->successor->listener)->vertex;
staticvoid unix_update_graph(struct unix_vertex *vertex)
{ /* If the receiver socket is not inflight, no cyclic * reference could be formed.
*/ if (!vertex) return;
void unix_update_edges(struct unix_sock *receiver)
{ /* nr_unix_fds is only updated under unix_state_lock(). * If it's 0 here, the embryo socket is not part of the * inflight graph, and GC will not see it, so no lock needed.
*/ if (!receiver->scm_stat.nr_unix_fds) {
receiver->listener = NULL;
} else {
spin_lock(&unix_gc_lock);
unix_update_graph(unix_sk(receiver->listener)->vertex);
receiver->listener = NULL;
spin_unlock(&unix_gc_lock);
}
}
int unix_prepare_fpl(struct scm_fp_list *fpl)
{ struct unix_vertex *vertex; int i;
if (!fpl->count_unix) return 0;
for (i = 0; i < fpl->count_unix; i++) {
vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); if (!vertex) goto err;
list_add(&vertex->entry, &fpl->vertices);
}
fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
GFP_KERNEL_ACCOUNT); if (!fpl->edges) goto err;
return 0;
err:
unix_free_vertices(fpl); return -ENOMEM;
}
void unix_destroy_fpl(struct scm_fp_list *fpl)
{ if (fpl->inflight)
unix_del_edges(fpl);
/* Self-reference or a embryo-listener circle ? */
list_for_each_entry(edge, &vertex->edges, vertex_entry) { if (unix_edge_successor(edge) == vertex) returntrue;
}
next_vertex: /* Push vertex to vertex_stack and mark it as on-stack * (index >= UNIX_VERTEX_INDEX_START). * The vertex will be popped when finalising SCC later.
*/
list_add(&vertex->scc_entry, &vertex_stack);
/* Explore neighbour vertices (receivers of the current vertex's fd). */
list_for_each_entry(edge, &vertex->edges, vertex_entry) { struct unix_vertex *next_vertex = unix_edge_successor(edge);
if (!next_vertex) continue;
if (next_vertex->index == unix_vertex_unvisited_index) { /* Iterative deepening depth first search * * 1. Push a forward edge to edge_stack and set * the successor to vertex for the next iteration.
*/
list_add(&edge->stack_entry, &edge_stack);
vertex = next_vertex; goto next_vertex;
/* 2. Pop the edge directed to the current vertex * and restore the ancestor for backtracking.
*/
prev_vertex:
edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
list_del_init(&edge->stack_entry);
/* If the successor has a smaller scc_index, two vertices * are in the same SCC, so propagate the smaller scc_index * to skip SCC finalisation.
*/
vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
} elseif (next_vertex->index != unix_vertex_grouped_index) { /* Loop detected by a back/cross edge. * * The successor is on vertex_stack, so two vertices are in * the same SCC. If the successor has a smaller *scc_index*, * propagate it to skip SCC finalisation.
*/
vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
} else { /* The successor was already grouped as another SCC */
}
}
/* SCC finalised. * * If the scc_index was not updated, all the vertices above on * vertex_stack are in the same SCC. Group them using scc_entry.
*/
__list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
list_for_each_entry_reverse(v, &scc, scc_entry) { /* Don't restart DFS from this vertex in unix_walk_scc(). */
list_move_tail(&v->entry, &unix_visited_vertices);
/* Mark vertex as off-stack. */
v->index = unix_vertex_grouped_index;
if (scc_dead)
scc_dead = unix_vertex_dead(v);
}
if (scc_dead) {
unix_collect_skb(&scc, hitlist);
} else { if (unix_vertex_max_scc_index < vertex->scc_index)
unix_vertex_max_scc_index = vertex->scc_index;
if (!unix_graph_maybe_cyclic)
unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
}
list_del(&scc);
}
/* Need backtracking ? */ if (!list_empty(&edge_stack)) goto prev_vertex;
}
void wait_for_unix_gc(struct scm_fp_list *fpl)
{ /* If number of inflight sockets is insane, * force a garbage collect right now. * * Paired with the WRITE_ONCE() in unix_inflight(), * unix_notinflight(), and __unix_gc().
*/ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
/* Penalise users who want to send AF_UNIX sockets * but whose sockets have not been received yet.
*/ if (!fpl || !fpl->count_unix ||
READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) return;
if (READ_ONCE(gc_in_progress))
flush_work(&unix_gc_work);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.