cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

swait.c (3681B)


      1// SPDX-License-Identifier: GPL-2.0
      2/*
      3 * <linux/swait.h> (simple wait queues ) implementation:
      4 */
      5
      6void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
      7			     struct lock_class_key *key)
      8{
      9	raw_spin_lock_init(&q->lock);
     10	lockdep_set_class_and_name(&q->lock, key, name);
     11	INIT_LIST_HEAD(&q->task_list);
     12}
     13EXPORT_SYMBOL(__init_swait_queue_head);
     14
     15/*
     16 * The thing about the wake_up_state() return value; I think we can ignore it.
     17 *
     18 * If for some reason it would return 0, that means the previously waiting
     19 * task is already running, so it will observe condition true (or has already).
     20 */
     21void swake_up_locked(struct swait_queue_head *q)
     22{
     23	struct swait_queue *curr;
     24
     25	if (list_empty(&q->task_list))
     26		return;
     27
     28	curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
     29	wake_up_process(curr->task);
     30	list_del_init(&curr->task_list);
     31}
     32EXPORT_SYMBOL(swake_up_locked);
     33
     34/*
     35 * Wake up all waiters. This is an interface which is solely exposed for
     36 * completions and not for general usage.
     37 *
     38 * It is intentionally different from swake_up_all() to allow usage from
     39 * hard interrupt context and interrupt disabled regions.
     40 */
     41void swake_up_all_locked(struct swait_queue_head *q)
     42{
     43	while (!list_empty(&q->task_list))
     44		swake_up_locked(q);
     45}
     46
     47void swake_up_one(struct swait_queue_head *q)
     48{
     49	unsigned long flags;
     50
     51	raw_spin_lock_irqsave(&q->lock, flags);
     52	swake_up_locked(q);
     53	raw_spin_unlock_irqrestore(&q->lock, flags);
     54}
     55EXPORT_SYMBOL(swake_up_one);
     56
     57/*
     58 * Does not allow usage from IRQ disabled, since we must be able to
     59 * release IRQs to guarantee bounded hold time.
     60 */
     61void swake_up_all(struct swait_queue_head *q)
     62{
     63	struct swait_queue *curr;
     64	LIST_HEAD(tmp);
     65
     66	raw_spin_lock_irq(&q->lock);
     67	list_splice_init(&q->task_list, &tmp);
     68	while (!list_empty(&tmp)) {
     69		curr = list_first_entry(&tmp, typeof(*curr), task_list);
     70
     71		wake_up_state(curr->task, TASK_NORMAL);
     72		list_del_init(&curr->task_list);
     73
     74		if (list_empty(&tmp))
     75			break;
     76
     77		raw_spin_unlock_irq(&q->lock);
     78		raw_spin_lock_irq(&q->lock);
     79	}
     80	raw_spin_unlock_irq(&q->lock);
     81}
     82EXPORT_SYMBOL(swake_up_all);
     83
     84void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
     85{
     86	wait->task = current;
     87	if (list_empty(&wait->task_list))
     88		list_add_tail(&wait->task_list, &q->task_list);
     89}
     90
     91void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
     92{
     93	unsigned long flags;
     94
     95	raw_spin_lock_irqsave(&q->lock, flags);
     96	__prepare_to_swait(q, wait);
     97	set_current_state(state);
     98	raw_spin_unlock_irqrestore(&q->lock, flags);
     99}
    100EXPORT_SYMBOL(prepare_to_swait_exclusive);
    101
    102long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
    103{
    104	unsigned long flags;
    105	long ret = 0;
    106
    107	raw_spin_lock_irqsave(&q->lock, flags);
    108	if (signal_pending_state(state, current)) {
    109		/*
    110		 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
    111		 * must not see us.
    112		 */
    113		list_del_init(&wait->task_list);
    114		ret = -ERESTARTSYS;
    115	} else {
    116		__prepare_to_swait(q, wait);
    117		set_current_state(state);
    118	}
    119	raw_spin_unlock_irqrestore(&q->lock, flags);
    120
    121	return ret;
    122}
    123EXPORT_SYMBOL(prepare_to_swait_event);
    124
    125void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
    126{
    127	__set_current_state(TASK_RUNNING);
    128	if (!list_empty(&wait->task_list))
    129		list_del_init(&wait->task_list);
    130}
    131
    132void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
    133{
    134	unsigned long flags;
    135
    136	__set_current_state(TASK_RUNNING);
    137
    138	if (!list_empty_careful(&wait->task_list)) {
    139		raw_spin_lock_irqsave(&q->lock, flags);
    140		list_del_init(&wait->task_list);
    141		raw_spin_unlock_irqrestore(&q->lock, flags);
    142	}
    143}
    144EXPORT_SYMBOL(finish_swait);