cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

garbage.c (9370B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 * NET3:	Garbage Collector For AF_UNIX sockets
      4 *
      5 * Garbage Collector:
      6 *	Copyright (C) Barak A. Pearlmutter.
      7 *
      8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
      9 * If it doesn't work blame me, it worked when Barak sent it.
     10 *
     11 * Assumptions:
     12 *
     13 *  - object w/ a bit
     14 *  - free list
     15 *
     16 * Current optimizations:
     17 *
     18 *  - explicit stack instead of recursion
     19 *  - tail recurse on first born instead of immediate push/pop
     20 *  - we gather the stuff that should not be killed into tree
     21 *    and stack is just a path from root to the current pointer.
     22 *
     23 *  Future optimizations:
     24 *
     25 *  - don't just push entire root set; process in place
     26 *
     27 *  Fixes:
     28 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
     29 *					Cope with changing max_files.
     30 *	Al Viro		11 Oct 1998
     31 *		Graph may have cycles. That is, we can send the descriptor
     32 *		of foo to bar and vice versa. Current code chokes on that.
     33 *		Fix: move SCM_RIGHTS ones into the separate list and then
     34 *		skb_free() them all instead of doing explicit fput's.
     35 *		Another problem: since fput() may block somebody may
     36 *		create a new unix_socket when we are in the middle of sweep
     37 *		phase. Fix: revert the logic wrt MARKED. Mark everything
     38 *		upon the beginning and unmark non-junk ones.
     39 *
     40 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
     41 *		sent to connect()'ed but still not accept()'ed sockets.
     42 *		Fixed. Old code had slightly different problem here:
     43 *		extra fput() in situation when we passed the descriptor via
     44 *		such socket and closed it (descriptor). That would happen on
     45 *		each unix_gc() until the accept(). Since the struct file in
     46 *		question would go to the free list and might be reused...
     47 *		That might be the reason of random oopses on filp_close()
     48 *		in unrelated processes.
     49 *
     50 *	AV		28 Feb 1999
     51 *		Kill the explicit allocation of stack. Now we keep the tree
     52 *		with root in dummy + pointer (gc_current) to one of the nodes.
     53 *		Stack is represented as path from gc_current to dummy. Unmark
     54 *		now means "add to tree". Push == "make it a son of gc_current".
     55 *		Pop == "move gc_current to parent". We keep only pointers to
     56 *		parents (->gc_tree).
     57 *	AV		1 Mar 1999
     58 *		Damn. Added missing check for ->dead in listen queues scanning.
     59 *
     60 *	Miklos Szeredi 25 Jun 2007
     61 *		Reimplement with a cycle collecting algorithm. This should
     62 *		solve several problems with the previous code, like being racy
     63 *		wrt receive and holding up unrelated socket operations.
     64 */
     65
     66#include <linux/kernel.h>
     67#include <linux/string.h>
     68#include <linux/socket.h>
     69#include <linux/un.h>
     70#include <linux/net.h>
     71#include <linux/fs.h>
     72#include <linux/skbuff.h>
     73#include <linux/netdevice.h>
     74#include <linux/file.h>
     75#include <linux/proc_fs.h>
     76#include <linux/mutex.h>
     77#include <linux/wait.h>
     78
     79#include <net/sock.h>
     80#include <net/af_unix.h>
     81#include <net/scm.h>
     82#include <net/tcp_states.h>
     83
     84#include "scm.h"
     85
     86/* Internal data structures and random procedures: */
     87
     88static LIST_HEAD(gc_candidates);
     89static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
     90
     91static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
     92			  struct sk_buff_head *hitlist)
     93{
     94	struct sk_buff *skb;
     95	struct sk_buff *next;
     96
     97	spin_lock(&x->sk_receive_queue.lock);
     98	skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
     99		/* Do we have file descriptors ? */
    100		if (UNIXCB(skb).fp) {
    101			bool hit = false;
    102			/* Process the descriptors of this socket */
    103			int nfd = UNIXCB(skb).fp->count;
    104			struct file **fp = UNIXCB(skb).fp->fp;
    105
    106			while (nfd--) {
    107				/* Get the socket the fd matches if it indeed does so */
    108				struct sock *sk = unix_get_socket(*fp++);
    109
    110				if (sk) {
    111					struct unix_sock *u = unix_sk(sk);
    112
    113					/* Ignore non-candidates, they could
    114					 * have been added to the queues after
    115					 * starting the garbage collection
    116					 */
    117					if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
    118						hit = true;
    119
    120						func(u);
    121					}
    122				}
    123			}
    124			if (hit && hitlist != NULL) {
    125				__skb_unlink(skb, &x->sk_receive_queue);
    126				__skb_queue_tail(hitlist, skb);
    127			}
    128		}
    129	}
    130	spin_unlock(&x->sk_receive_queue.lock);
    131}
    132
    133static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
    134			  struct sk_buff_head *hitlist)
    135{
    136	if (x->sk_state != TCP_LISTEN) {
    137		scan_inflight(x, func, hitlist);
    138	} else {
    139		struct sk_buff *skb;
    140		struct sk_buff *next;
    141		struct unix_sock *u;
    142		LIST_HEAD(embryos);
    143
    144		/* For a listening socket collect the queued embryos
    145		 * and perform a scan on them as well.
    146		 */
    147		spin_lock(&x->sk_receive_queue.lock);
    148		skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
    149			u = unix_sk(skb->sk);
    150
    151			/* An embryo cannot be in-flight, so it's safe
    152			 * to use the list link.
    153			 */
    154			BUG_ON(!list_empty(&u->link));
    155			list_add_tail(&u->link, &embryos);
    156		}
    157		spin_unlock(&x->sk_receive_queue.lock);
    158
    159		while (!list_empty(&embryos)) {
    160			u = list_entry(embryos.next, struct unix_sock, link);
    161			scan_inflight(&u->sk, func, hitlist);
    162			list_del_init(&u->link);
    163		}
    164	}
    165}
    166
    167static void dec_inflight(struct unix_sock *usk)
    168{
    169	atomic_long_dec(&usk->inflight);
    170}
    171
    172static void inc_inflight(struct unix_sock *usk)
    173{
    174	atomic_long_inc(&usk->inflight);
    175}
    176
    177static void inc_inflight_move_tail(struct unix_sock *u)
    178{
    179	atomic_long_inc(&u->inflight);
    180	/* If this still might be part of a cycle, move it to the end
    181	 * of the list, so that it's checked even if it was already
    182	 * passed over
    183	 */
    184	if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
    185		list_move_tail(&u->link, &gc_candidates);
    186}
    187
    188static bool gc_in_progress;
    189#define UNIX_INFLIGHT_TRIGGER_GC 16000
    190
    191void wait_for_unix_gc(void)
    192{
    193	/* If number of inflight sockets is insane,
    194	 * force a garbage collect right now.
    195	 * Paired with the WRITE_ONCE() in unix_inflight(),
    196	 * unix_notinflight() and gc_in_progress().
    197	 */
    198	if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
    199	    !READ_ONCE(gc_in_progress))
    200		unix_gc();
    201	wait_event(unix_gc_wait, gc_in_progress == false);
    202}
    203
    204/* The external entry point: unix_gc() */
    205void unix_gc(void)
    206{
    207	struct unix_sock *u;
    208	struct unix_sock *next;
    209	struct sk_buff_head hitlist;
    210	struct list_head cursor;
    211	LIST_HEAD(not_cycle_list);
    212
    213	spin_lock(&unix_gc_lock);
    214
    215	/* Avoid a recursive GC. */
    216	if (gc_in_progress)
    217		goto out;
    218
    219	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
    220	WRITE_ONCE(gc_in_progress, true);
    221
    222	/* First, select candidates for garbage collection.  Only
    223	 * in-flight sockets are considered, and from those only ones
    224	 * which don't have any external reference.
    225	 *
    226	 * Holding unix_gc_lock will protect these candidates from
    227	 * being detached, and hence from gaining an external
    228	 * reference.  Since there are no possible receivers, all
    229	 * buffers currently on the candidates' queues stay there
    230	 * during the garbage collection.
    231	 *
    232	 * We also know that no new candidate can be added onto the
    233	 * receive queues.  Other, non candidate sockets _can_ be
    234	 * added to queue, so we must make sure only to touch
    235	 * candidates.
    236	 */
    237	list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
    238		long total_refs;
    239		long inflight_refs;
    240
    241		total_refs = file_count(u->sk.sk_socket->file);
    242		inflight_refs = atomic_long_read(&u->inflight);
    243
    244		BUG_ON(inflight_refs < 1);
    245		BUG_ON(total_refs < inflight_refs);
    246		if (total_refs == inflight_refs) {
    247			list_move_tail(&u->link, &gc_candidates);
    248			__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
    249			__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
    250		}
    251	}
    252
    253	/* Now remove all internal in-flight reference to children of
    254	 * the candidates.
    255	 */
    256	list_for_each_entry(u, &gc_candidates, link)
    257		scan_children(&u->sk, dec_inflight, NULL);
    258
    259	/* Restore the references for children of all candidates,
    260	 * which have remaining references.  Do this recursively, so
    261	 * only those remain, which form cyclic references.
    262	 *
    263	 * Use a "cursor" link, to make the list traversal safe, even
    264	 * though elements might be moved about.
    265	 */
    266	list_add(&cursor, &gc_candidates);
    267	while (cursor.next != &gc_candidates) {
    268		u = list_entry(cursor.next, struct unix_sock, link);
    269
    270		/* Move cursor to after the current position. */
    271		list_move(&cursor, &u->link);
    272
    273		if (atomic_long_read(&u->inflight) > 0) {
    274			list_move_tail(&u->link, &not_cycle_list);
    275			__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
    276			scan_children(&u->sk, inc_inflight_move_tail, NULL);
    277		}
    278	}
    279	list_del(&cursor);
    280
    281	/* Now gc_candidates contains only garbage.  Restore original
    282	 * inflight counters for these as well, and remove the skbuffs
    283	 * which are creating the cycle(s).
    284	 */
    285	skb_queue_head_init(&hitlist);
    286	list_for_each_entry(u, &gc_candidates, link)
    287		scan_children(&u->sk, inc_inflight, &hitlist);
    288
    289	/* not_cycle_list contains those sockets which do not make up a
    290	 * cycle.  Restore these to the inflight list.
    291	 */
    292	while (!list_empty(&not_cycle_list)) {
    293		u = list_entry(not_cycle_list.next, struct unix_sock, link);
    294		__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
    295		list_move_tail(&u->link, &gc_inflight_list);
    296	}
    297
    298	spin_unlock(&unix_gc_lock);
    299
    300	/* Here we are. Hitlist is filled. Die. */
    301	__skb_queue_purge(&hitlist);
    302
    303	spin_lock(&unix_gc_lock);
    304
    305	/* All candidates should have been detached by now. */
    306	BUG_ON(!list_empty(&gc_candidates));
    307
    308	/* Paired with READ_ONCE() in wait_for_unix_gc(). */
    309	WRITE_ONCE(gc_in_progress, false);
    310
    311	wake_up(&unix_gc_wait);
    312
    313 out:
    314	spin_unlock(&unix_gc_lock);
    315}