cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

fs_pin.c (1905B)


      1// SPDX-License-Identifier: GPL-2.0
      2#include <linux/fs.h>
      3#include <linux/sched.h>
      4#include <linux/slab.h>
      5#include "internal.h"
      6#include "mount.h"
      7
      8static DEFINE_SPINLOCK(pin_lock);
      9
     10void pin_remove(struct fs_pin *pin)
     11{
     12	spin_lock(&pin_lock);
     13	hlist_del_init(&pin->m_list);
     14	hlist_del_init(&pin->s_list);
     15	spin_unlock(&pin_lock);
     16	spin_lock_irq(&pin->wait.lock);
     17	pin->done = 1;
     18	wake_up_locked(&pin->wait);
     19	spin_unlock_irq(&pin->wait.lock);
     20}
     21
     22void pin_insert(struct fs_pin *pin, struct vfsmount *m)
     23{
     24	spin_lock(&pin_lock);
     25	hlist_add_head(&pin->s_list, &m->mnt_sb->s_pins);
     26	hlist_add_head(&pin->m_list, &real_mount(m)->mnt_pins);
     27	spin_unlock(&pin_lock);
     28}
     29
     30void pin_kill(struct fs_pin *p)
     31{
     32	wait_queue_entry_t wait;
     33
     34	if (!p) {
     35		rcu_read_unlock();
     36		return;
     37	}
     38	init_wait(&wait);
     39	spin_lock_irq(&p->wait.lock);
     40	if (likely(!p->done)) {
     41		p->done = -1;
     42		spin_unlock_irq(&p->wait.lock);
     43		rcu_read_unlock();
     44		p->kill(p);
     45		return;
     46	}
     47	if (p->done > 0) {
     48		spin_unlock_irq(&p->wait.lock);
     49		rcu_read_unlock();
     50		return;
     51	}
     52	__add_wait_queue(&p->wait, &wait);
     53	while (1) {
     54		set_current_state(TASK_UNINTERRUPTIBLE);
     55		spin_unlock_irq(&p->wait.lock);
     56		rcu_read_unlock();
     57		schedule();
     58		rcu_read_lock();
     59		if (likely(list_empty(&wait.entry)))
     60			break;
     61		/* OK, we know p couldn't have been freed yet */
     62		spin_lock_irq(&p->wait.lock);
     63		if (p->done > 0) {
     64			spin_unlock_irq(&p->wait.lock);
     65			break;
     66		}
     67	}
     68	rcu_read_unlock();
     69}
     70
     71void mnt_pin_kill(struct mount *m)
     72{
     73	while (1) {
     74		struct hlist_node *p;
     75		rcu_read_lock();
     76		p = READ_ONCE(m->mnt_pins.first);
     77		if (!p) {
     78			rcu_read_unlock();
     79			break;
     80		}
     81		pin_kill(hlist_entry(p, struct fs_pin, m_list));
     82	}
     83}
     84
     85void group_pin_kill(struct hlist_head *p)
     86{
     87	while (1) {
     88		struct hlist_node *q;
     89		rcu_read_lock();
     90		q = READ_ONCE(p->first);
     91		if (!q) {
     92			rcu_read_unlock();
     93			break;
     94		}
     95		pin_kill(hlist_entry(q, struct fs_pin, s_list));
     96	}
     97}