xref: /kernel/linux/linux-5.10/net/unix/garbage.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * NET3:	Garbage Collector For AF_UNIX sockets
4 *
5 * Garbage Collector:
6 *	Copyright (C) Barak A. Pearlmutter.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 *  - object w/ a bit
14 *  - free list
15 *
16 * Current optimizations:
17 *
18 *  - explicit stack instead of recursion
19 *  - tail recurse on first born instead of immediate push/pop
20 *  - we gather the stuff that should not be killed into tree
21 *    and stack is just a path from root to the current pointer.
22 *
23 *  Future optimizations:
24 *
25 *  - don't just push entire root set; process in place
26 *
27 *  Fixes:
28 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
29 *					Cope with changing max_files.
30 *	Al Viro		11 Oct 1998
31 *		Graph may have cycles. That is, we can send the descriptor
32 *		of foo to bar and vice versa. Current code chokes on that.
33 *		Fix: move SCM_RIGHTS ones into the separate list and then
34 *		skb_free() them all instead of doing explicit fput's.
35 *		Another problem: since fput() may block somebody may
36 *		create a new unix_socket when we are in the middle of sweep
37 *		phase. Fix: revert the logic wrt MARKED. Mark everything
38 *		upon the beginning and unmark non-junk ones.
39 *
40 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 *		sent to connect()'ed but still not accept()'ed sockets.
42 *		Fixed. Old code had slightly different problem here:
43 *		extra fput() in situation when we passed the descriptor via
44 *		such socket and closed it (descriptor). That would happen on
45 *		each unix_gc() until the accept(). Since the struct file in
46 *		question would go to the free list and might be reused...
47 *		That might be the reason of random oopses on filp_close()
48 *		in unrelated processes.
49 *
50 *	AV		28 Feb 1999
51 *		Kill the explicit allocation of stack. Now we keep the tree
52 *		with root in dummy + pointer (gc_current) to one of the nodes.
53 *		Stack is represented as path from gc_current to dummy. Unmark
54 *		now means "add to tree". Push == "make it a son of gc_current".
55 *		Pop == "move gc_current to parent". We keep only pointers to
56 *		parents (->gc_tree).
57 *	AV		1 Mar 1999
58 *		Damn. Added missing check for ->dead in listen queues scanning.
59 *
60 *	Miklos Szeredi 25 Jun 2007
61 *		Reimplement with a cycle collecting algorithm. This should
62 *		solve several problems with the previous code, like being racy
63 *		wrt receive and holding up unrelated socket operations.
64 */
65
66#include <linux/kernel.h>
67#include <linux/string.h>
68#include <linux/socket.h>
69#include <linux/un.h>
70#include <linux/net.h>
71#include <linux/fs.h>
72#include <linux/skbuff.h>
73#include <linux/netdevice.h>
74#include <linux/file.h>
75#include <linux/proc_fs.h>
76#include <linux/mutex.h>
77#include <linux/wait.h>
78
79#include <net/sock.h>
80#include <net/af_unix.h>
81#include <net/scm.h>
82#include <net/tcp_states.h>
83
84#include "scm.h"
85
86/* Internal data structures and random procedures: */
87
88static LIST_HEAD(gc_candidates);
89static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
90
91static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
92			  struct sk_buff_head *hitlist)
93{
94	struct sk_buff *skb;
95	struct sk_buff *next;
96
97	spin_lock(&x->sk_receive_queue.lock);
98	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
99		/* Do we have file descriptors ? */
100		if (UNIXCB(skb).fp) {
101			bool hit = false;
102			/* Process the descriptors of this socket */
103			int nfd = UNIXCB(skb).fp->count;
104			struct file **fp = UNIXCB(skb).fp->fp;
105
106			while (nfd--) {
107				/* Get the socket the fd matches if it indeed does so */
108				struct sock *sk = unix_get_socket(*fp++);
109
110				if (sk) {
111					struct unix_sock *u = unix_sk(sk);
112
113					/* Ignore non-candidates, they could
114					 * have been added to the queues after
115					 * starting the garbage collection
116					 */
117					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
118						hit = true;
119
120						func(u);
121					}
122				}
123			}
124			if (hit && hitlist != NULL) {
125				__skb_unlink(skb, &x->sk_receive_queue);
126				__skb_queue_tail(hitlist, skb);
127			}
128		}
129	}
130	spin_unlock(&x->sk_receive_queue.lock);
131}
132
133static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
134			  struct sk_buff_head *hitlist)
135{
136	if (x->sk_state != TCP_LISTEN) {
137		scan_inflight(x, func, hitlist);
138	} else {
139		struct sk_buff *skb;
140		struct sk_buff *next;
141		struct unix_sock *u;
142		LIST_HEAD(embryos);
143
144		/* For a listening socket collect the queued embryos
145		 * and perform a scan on them as well.
146		 */
147		spin_lock(&x->sk_receive_queue.lock);
148		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
149			u = unix_sk(skb->sk);
150
151			/* An embryo cannot be in-flight, so it's safe
152			 * to use the list link.
153			 */
154			BUG_ON(!list_empty(&u->link));
155			list_add_tail(&u->link, &embryos);
156		}
157		spin_unlock(&x->sk_receive_queue.lock);
158
159		while (!list_empty(&embryos)) {
160			u = list_entry(embryos.next, struct unix_sock, link);
161			scan_inflight(&u->sk, func, hitlist);
162			list_del_init(&u->link);
163		}
164	}
165}
166
167static void dec_inflight(struct unix_sock *usk)
168{
169	atomic_long_dec(&usk->inflight);
170}
171
172static void inc_inflight(struct unix_sock *usk)
173{
174	atomic_long_inc(&usk->inflight);
175}
176
177static void inc_inflight_move_tail(struct unix_sock *u)
178{
179	atomic_long_inc(&u->inflight);
180	/* If this still might be part of a cycle, move it to the end
181	 * of the list, so that it's checked even if it was already
182	 * passed over
183	 */
184	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
185		list_move_tail(&u->link, &gc_candidates);
186}
187
188static bool gc_in_progress;
189#define UNIX_INFLIGHT_TRIGGER_GC 16000
190
191void wait_for_unix_gc(void)
192{
193	/* If number of inflight sockets is insane,
194	 * force a garbage collect right now.
195	 * Paired with the WRITE_ONCE() in unix_inflight(),
196	 * unix_notinflight() and gc_in_progress().
197	 */
198	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
199	    !READ_ONCE(gc_in_progress))
200		unix_gc();
201	wait_event(unix_gc_wait, gc_in_progress == false);
202}
203
204/* The external entry point: unix_gc() */
205void unix_gc(void)
206{
207	struct sk_buff *next_skb, *skb;
208	struct unix_sock *u;
209	struct unix_sock *next;
210	struct sk_buff_head hitlist;
211	struct list_head cursor;
212	LIST_HEAD(not_cycle_list);
213
214	spin_lock(&unix_gc_lock);
215
216	/* Avoid a recursive GC. */
217	if (gc_in_progress)
218		goto out;
219
220	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
221	WRITE_ONCE(gc_in_progress, true);
222
223	/* First, select candidates for garbage collection.  Only
224	 * in-flight sockets are considered, and from those only ones
225	 * which don't have any external reference.
226	 *
227	 * Holding unix_gc_lock will protect these candidates from
228	 * being detached, and hence from gaining an external
229	 * reference.  Since there are no possible receivers, all
230	 * buffers currently on the candidates' queues stay there
231	 * during the garbage collection.
232	 *
233	 * We also know that no new candidate can be added onto the
234	 * receive queues.  Other, non candidate sockets _can_ be
235	 * added to queue, so we must make sure only to touch
236	 * candidates.
237	 *
238	 * Embryos, though never candidates themselves, affect which
239	 * candidates are reachable by the garbage collector.  Before
240	 * being added to a listener's queue, an embryo may already
241	 * receive data carrying SCM_RIGHTS, potentially making the
242	 * passed socket a candidate that is not yet reachable by the
243	 * collector.  It becomes reachable once the embryo is
244	 * enqueued.  Therefore, we must ensure that no SCM-laden
245	 * embryo appears in a (candidate) listener's queue between
246	 * consecutive scan_children() calls.
247	 */
248	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
249		struct sock *sk = &u->sk;
250		long total_refs;
251		long inflight_refs;
252
253		total_refs = file_count(sk->sk_socket->file);
254		inflight_refs = atomic_long_read(&u->inflight);
255
256		BUG_ON(inflight_refs < 1);
257		BUG_ON(total_refs < inflight_refs);
258		if (total_refs == inflight_refs) {
259			list_move_tail(&u->link, &gc_candidates);
260			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
261			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
262
263			if (sk->sk_state == TCP_LISTEN) {
264				unix_state_lock(sk);
265				unix_state_unlock(sk);
266			}
267		}
268	}
269
270	/* Now remove all internal in-flight reference to children of
271	 * the candidates.
272	 */
273	list_for_each_entry(u, &gc_candidates, link)
274		scan_children(&u->sk, dec_inflight, NULL);
275
276	/* Restore the references for children of all candidates,
277	 * which have remaining references.  Do this recursively, so
278	 * only those remain, which form cyclic references.
279	 *
280	 * Use a "cursor" link, to make the list traversal safe, even
281	 * though elements might be moved about.
282	 */
283	list_add(&cursor, &gc_candidates);
284	while (cursor.next != &gc_candidates) {
285		u = list_entry(cursor.next, struct unix_sock, link);
286
287		/* Move cursor to after the current position. */
288		list_move(&cursor, &u->link);
289
290		if (atomic_long_read(&u->inflight) > 0) {
291			list_move_tail(&u->link, &not_cycle_list);
292			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
293			scan_children(&u->sk, inc_inflight_move_tail, NULL);
294		}
295	}
296	list_del(&cursor);
297
298	/* Now gc_candidates contains only garbage.  Restore original
299	 * inflight counters for these as well, and remove the skbuffs
300	 * which are creating the cycle(s).
301	 */
302	skb_queue_head_init(&hitlist);
303	list_for_each_entry(u, &gc_candidates, link)
304		scan_children(&u->sk, inc_inflight, &hitlist);
305
306	/* not_cycle_list contains those sockets which do not make up a
307	 * cycle.  Restore these to the inflight list.
308	 */
309	while (!list_empty(&not_cycle_list)) {
310		u = list_entry(not_cycle_list.next, struct unix_sock, link);
311		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
312		list_move_tail(&u->link, &gc_inflight_list);
313	}
314
315	spin_unlock(&unix_gc_lock);
316
317	/* We need io_uring to clean its registered files, ignore all io_uring
318	 * originated skbs. It's fine as io_uring doesn't keep references to
319	 * other io_uring instances and so killing all other files in the cycle
320	 * will put all io_uring references forcing it to go through normal
321	 * release.path eventually putting registered files.
322	 */
323	skb_queue_walk_safe(&hitlist, skb, next_skb) {
324		if (skb->scm_io_uring) {
325			__skb_unlink(skb, &hitlist);
326			skb_queue_tail(&skb->sk->sk_receive_queue, skb);
327		}
328	}
329
330	/* Here we are. Hitlist is filled. Die. */
331	__skb_queue_purge(&hitlist);
332
333	spin_lock(&unix_gc_lock);
334
335	/* There could be io_uring registered files, just push them back to
336	 * the inflight list
337	 */
338	list_for_each_entry_safe(u, next, &gc_candidates, link)
339		list_move_tail(&u->link, &gc_inflight_list);
340
341	/* All candidates should have been detached by now. */
342	BUG_ON(!list_empty(&gc_candidates));
343
344	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
345	WRITE_ONCE(gc_in_progress, false);
346
347	wake_up(&unix_gc_wait);
348
349 out:
350	spin_unlock(&unix_gc_lock);
351}
352