cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

requestqueue.c (5102B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/******************************************************************************
      3*******************************************************************************
      4**
      5**  Copyright (C) 2005-2007 Red Hat, Inc.  All rights reserved.
      6**
      7**
      8*******************************************************************************
      9******************************************************************************/
     10
     11#include "dlm_internal.h"
     12#include "member.h"
     13#include "lock.h"
     14#include "dir.h"
     15#include "config.h"
     16#include "requestqueue.h"
     17#include "util.h"
     18
     19struct rq_entry {
     20	struct list_head list;
     21	uint32_t recover_seq;
     22	int nodeid;
     23	struct dlm_message request;
     24};
     25
     26/*
     27 * Requests received while the lockspace is in recovery get added to the
     28 * request queue and processed when recovery is complete.  This happens when
     29 * the lockspace is suspended on some nodes before it is on others, or the
     30 * lockspace is enabled on some while still suspended on others.
     31 */
     32
     33void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
     34{
     35	struct rq_entry *e;
     36	int length = le16_to_cpu(ms->m_header.h_length) -
     37		sizeof(struct dlm_message);
     38
     39	e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
     40	if (!e) {
     41		log_print("dlm_add_requestqueue: out of memory len %d", length);
     42		return;
     43	}
     44
     45	e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
     46	e->nodeid = nodeid;
     47	memcpy(&e->request, ms, le16_to_cpu(ms->m_header.h_length));
     48
     49	atomic_inc(&ls->ls_requestqueue_cnt);
     50	mutex_lock(&ls->ls_requestqueue_mutex);
     51	list_add_tail(&e->list, &ls->ls_requestqueue);
     52	mutex_unlock(&ls->ls_requestqueue_mutex);
     53}
     54
     55/*
     56 * Called by dlm_recoverd to process normal messages saved while recovery was
     57 * happening.  Normal locking has been enabled before this is called.  dlm_recv
     58 * upon receiving a message, will wait for all saved messages to be drained
     59 * here before processing the message it got.  If a new dlm_ls_stop() arrives
     60 * while we're processing these saved messages, it may block trying to suspend
     61 * dlm_recv if dlm_recv is waiting for us in dlm_wait_requestqueue.  In that
     62 * case, we don't abort since locking_stopped is still 0.  If dlm_recv is not
     63 * waiting for us, then this processing may be aborted due to locking_stopped.
     64 */
     65
     66int dlm_process_requestqueue(struct dlm_ls *ls)
     67{
     68	struct rq_entry *e;
     69	struct dlm_message *ms;
     70	int error = 0;
     71
     72	mutex_lock(&ls->ls_requestqueue_mutex);
     73
     74	for (;;) {
     75		if (list_empty(&ls->ls_requestqueue)) {
     76			mutex_unlock(&ls->ls_requestqueue_mutex);
     77			error = 0;
     78			break;
     79		}
     80		e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list);
     81		mutex_unlock(&ls->ls_requestqueue_mutex);
     82
     83		ms = &e->request;
     84
     85		log_limit(ls, "dlm_process_requestqueue msg %d from %d "
     86			  "lkid %x remid %x result %d seq %u",
     87			  le32_to_cpu(ms->m_type),
     88			  le32_to_cpu(ms->m_header.h_nodeid),
     89			  le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
     90			  from_dlm_errno(le32_to_cpu(ms->m_result)),
     91			  e->recover_seq);
     92
     93		dlm_receive_message_saved(ls, &e->request, e->recover_seq);
     94
     95		mutex_lock(&ls->ls_requestqueue_mutex);
     96		list_del(&e->list);
     97		if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
     98			wake_up(&ls->ls_requestqueue_wait);
     99		kfree(e);
    100
    101		if (dlm_locking_stopped(ls)) {
    102			log_debug(ls, "process_requestqueue abort running");
    103			mutex_unlock(&ls->ls_requestqueue_mutex);
    104			error = -EINTR;
    105			break;
    106		}
    107		schedule();
    108	}
    109
    110	return error;
    111}
    112
    113/*
    114 * After recovery is done, locking is resumed and dlm_recoverd takes all the
    115 * saved requests and processes them as they would have been by dlm_recv.  At
    116 * the same time, dlm_recv will start receiving new requests from remote nodes.
    117 * We want to delay dlm_recv processing new requests until dlm_recoverd has
    118 * finished processing the old saved requests.  We don't check for locking
    119 * stopped here because dlm_ls_stop won't stop locking until it's suspended us
    120 * (dlm_recv).
    121 */
    122
    123void dlm_wait_requestqueue(struct dlm_ls *ls)
    124{
    125	wait_event(ls->ls_requestqueue_wait,
    126		   atomic_read(&ls->ls_requestqueue_cnt) == 0);
    127}
    128
    129static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
    130{
    131	__le32 type = ms->m_type;
    132
    133	/* the ls is being cleaned up and freed by release_lockspace */
    134	if (!atomic_read(&ls->ls_count))
    135		return 1;
    136
    137	if (dlm_is_removed(ls, nodeid))
    138		return 1;
    139
    140	/* directory operations are always purged because the directory is
    141	   always rebuilt during recovery and the lookups resent */
    142
    143	if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
    144	    type == cpu_to_le32(DLM_MSG_LOOKUP) ||
    145	    type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
    146		return 1;
    147
    148	if (!dlm_no_directory(ls))
    149		return 0;
    150
    151	return 1;
    152}
    153
    154void dlm_purge_requestqueue(struct dlm_ls *ls)
    155{
    156	struct dlm_message *ms;
    157	struct rq_entry *e, *safe;
    158
    159	mutex_lock(&ls->ls_requestqueue_mutex);
    160	list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) {
    161		ms =  &e->request;
    162
    163		if (purge_request(ls, ms, e->nodeid)) {
    164			list_del(&e->list);
    165			if (atomic_dec_and_test(&ls->ls_requestqueue_cnt))
    166				wake_up(&ls->ls_requestqueue_wait);
    167			kfree(e);
    168		}
    169	}
    170	mutex_unlock(&ls->ls_requestqueue_mutex);
    171}
    172