1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET3: Garbage Collector For AF_UNIX sockets
6 * Copyright (C) Barak A. Pearlmutter.
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
16 * Current optimizations:
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
23 * Future optimizations:
25 * - don't just push entire root set; process in place
28 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
29 * Cope with changing max_files.
31 * Graph may have cycles. That is, we can send the descriptor
32 * of foo to bar and vice versa. Current code chokes on that.
33 * Fix: move SCM_RIGHTS ones into the separate list and then
34 * skb_free() them all instead of doing explicit fput's.
35 * Another problem: since fput() may block somebody may
36 * create a new unix_socket when we are in the middle of sweep
37 * phase. Fix: revert the logic wrt MARKED. Mark everything
38 * upon the beginning and unmark non-junk ones.
40 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41 * sent to connect()'ed but still not accept()'ed sockets.
42 * Fixed. Old code had slightly different problem here:
43 * extra fput() in situation when we passed the descriptor via
44 * such socket and closed it (descriptor). That would happen on
45 * each unix_gc() until the accept(). Since the struct file in
46 * question would go to the free list and might be reused...
47 * That might be the reason of random oopses on filp_close()
48 * in unrelated processes.
51 * Kill the explicit allocation of stack. Now we keep the tree
52 * with root in dummy + pointer (gc_current) to one of the nodes.
53 * Stack is represented as path from gc_current to dummy. Unmark
54 * now means "add to tree". Push == "make it a son of gc_current".
55 * Pop == "move gc_current to parent". We keep only pointers to
56 * parents (->gc_tree).
58 * Damn. Added missing check for ->dead in listen queues scanning.
60 * Miklos Szeredi 25 Jun 2007
61 * Reimplement with a cycle collecting algorithm. This should
62 * solve several problems with the previous code, like being racy
63 * wrt receive and holding up unrelated socket operations.
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
70 #include <linux/net.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
80 #include <net/af_unix.h>
82 #include <net/tcp_states.h>
86 /* Internal data structures and random procedures: */
88 static LIST_HEAD(gc_candidates
);
89 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait
);
91 static void scan_inflight(struct sock
*x
, void (*func
)(struct unix_sock
*),
92 struct sk_buff_head
*hitlist
)
97 spin_lock(&x
->sk_receive_queue
.lock
);
98 skb_queue_walk_safe(&x
->sk_receive_queue
, skb
, next
) {
99 /* Do we have file descriptors ? */
100 if (UNIXCB(skb
).fp
) {
102 /* Process the descriptors of this socket */
103 int nfd
= UNIXCB(skb
).fp
->count
;
104 struct file
**fp
= UNIXCB(skb
).fp
->fp
;
107 /* Get the socket the fd matches if it indeed does so */
108 struct sock
*sk
= unix_get_socket(*fp
++);
111 struct unix_sock
*u
= unix_sk(sk
);
113 /* Ignore non-candidates, they could
114 * have been added to the queues after
115 * starting the garbage collection
117 if (test_bit(UNIX_GC_CANDIDATE
, &u
->gc_flags
)) {
124 if (hit
&& hitlist
!= NULL
) {
125 __skb_unlink(skb
, &x
->sk_receive_queue
);
126 __skb_queue_tail(hitlist
, skb
);
130 spin_unlock(&x
->sk_receive_queue
.lock
);
133 static void scan_children(struct sock
*x
, void (*func
)(struct unix_sock
*),
134 struct sk_buff_head
*hitlist
)
136 if (x
->sk_state
!= TCP_LISTEN
) {
137 scan_inflight(x
, func
, hitlist
);
140 struct sk_buff
*next
;
144 /* For a listening socket collect the queued embryos
145 * and perform a scan on them as well.
147 spin_lock(&x
->sk_receive_queue
.lock
);
148 skb_queue_walk_safe(&x
->sk_receive_queue
, skb
, next
) {
149 u
= unix_sk(skb
->sk
);
151 /* An embryo cannot be in-flight, so it's safe
152 * to use the list link.
154 BUG_ON(!list_empty(&u
->link
));
155 list_add_tail(&u
->link
, &embryos
);
157 spin_unlock(&x
->sk_receive_queue
.lock
);
159 while (!list_empty(&embryos
)) {
160 u
= list_entry(embryos
.next
, struct unix_sock
, link
);
161 scan_inflight(&u
->sk
, func
, hitlist
);
162 list_del_init(&u
->link
);
167 static void dec_inflight(struct unix_sock
*usk
)
169 atomic_long_dec(&usk
->inflight
);
172 static void inc_inflight(struct unix_sock
*usk
)
174 atomic_long_inc(&usk
->inflight
);
177 static void inc_inflight_move_tail(struct unix_sock
*u
)
179 atomic_long_inc(&u
->inflight
);
180 /* If this still might be part of a cycle, move it to the end
181 * of the list, so that it's checked even if it was already
184 if (test_bit(UNIX_GC_MAYBE_CYCLE
, &u
->gc_flags
))
185 list_move_tail(&u
->link
, &gc_candidates
);
188 static bool gc_in_progress
;
189 #define UNIX_INFLIGHT_TRIGGER_GC 16000
191 void wait_for_unix_gc(void)
193 /* If number of inflight sockets is insane,
194 * force a garbage collect right now.
196 if (unix_tot_inflight
> UNIX_INFLIGHT_TRIGGER_GC
&& !gc_in_progress
)
198 wait_event(unix_gc_wait
, gc_in_progress
== false);
201 /* The external entry point: unix_gc() */
205 struct unix_sock
*next
;
206 struct sk_buff_head hitlist
;
207 struct list_head cursor
;
208 LIST_HEAD(not_cycle_list
);
210 spin_lock(&unix_gc_lock
);
212 /* Avoid a recursive GC. */
216 gc_in_progress
= true;
217 /* First, select candidates for garbage collection. Only
218 * in-flight sockets are considered, and from those only ones
219 * which don't have any external reference.
221 * Holding unix_gc_lock will protect these candidates from
222 * being detached, and hence from gaining an external
223 * reference. Since there are no possible receivers, all
224 * buffers currently on the candidates' queues stay there
225 * during the garbage collection.
227 * We also know that no new candidate can be added onto the
228 * receive queues. Other, non candidate sockets _can_ be
229 * added to queue, so we must make sure only to touch
232 list_for_each_entry_safe(u
, next
, &gc_inflight_list
, link
) {
236 total_refs
= file_count(u
->sk
.sk_socket
->file
);
237 inflight_refs
= atomic_long_read(&u
->inflight
);
239 BUG_ON(inflight_refs
< 1);
240 BUG_ON(total_refs
< inflight_refs
);
241 if (total_refs
== inflight_refs
) {
242 list_move_tail(&u
->link
, &gc_candidates
);
243 __set_bit(UNIX_GC_CANDIDATE
, &u
->gc_flags
);
244 __set_bit(UNIX_GC_MAYBE_CYCLE
, &u
->gc_flags
);
248 /* Now remove all internal in-flight reference to children of
251 list_for_each_entry(u
, &gc_candidates
, link
)
252 scan_children(&u
->sk
, dec_inflight
, NULL
);
254 /* Restore the references for children of all candidates,
255 * which have remaining references. Do this recursively, so
256 * only those remain, which form cyclic references.
258 * Use a "cursor" link, to make the list traversal safe, even
259 * though elements might be moved about.
261 list_add(&cursor
, &gc_candidates
);
262 while (cursor
.next
!= &gc_candidates
) {
263 u
= list_entry(cursor
.next
, struct unix_sock
, link
);
265 /* Move cursor to after the current position. */
266 list_move(&cursor
, &u
->link
);
268 if (atomic_long_read(&u
->inflight
) > 0) {
269 list_move_tail(&u
->link
, ¬_cycle_list
);
270 __clear_bit(UNIX_GC_MAYBE_CYCLE
, &u
->gc_flags
);
271 scan_children(&u
->sk
, inc_inflight_move_tail
, NULL
);
276 /* Now gc_candidates contains only garbage. Restore original
277 * inflight counters for these as well, and remove the skbuffs
278 * which are creating the cycle(s).
280 skb_queue_head_init(&hitlist
);
281 list_for_each_entry(u
, &gc_candidates
, link
)
282 scan_children(&u
->sk
, inc_inflight
, &hitlist
);
284 /* not_cycle_list contains those sockets which do not make up a
285 * cycle. Restore these to the inflight list.
287 while (!list_empty(¬_cycle_list
)) {
288 u
= list_entry(not_cycle_list
.next
, struct unix_sock
, link
);
289 __clear_bit(UNIX_GC_CANDIDATE
, &u
->gc_flags
);
290 list_move_tail(&u
->link
, &gc_inflight_list
);
293 spin_unlock(&unix_gc_lock
);
295 /* Here we are. Hitlist is filled. Die. */
296 __skb_queue_purge(&hitlist
);
298 spin_lock(&unix_gc_lock
);
300 /* All candidates should have been detached by now. */
301 BUG_ON(!list_empty(&gc_candidates
));
302 gc_in_progress
= false;
303 wake_up(&unix_gc_wait
);
306 spin_unlock(&unix_gc_lock
);