cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

vmpressure.c (14186B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * Linux VM pressure
      4 *
      5 * Copyright 2012 Linaro Ltd.
      6 *		  Anton Vorontsov <anton.vorontsov@linaro.org>
      7 *
      8 * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
      9 * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
     10 */
     11
     12#include <linux/cgroup.h>
     13#include <linux/fs.h>
     14#include <linux/log2.h>
     15#include <linux/sched.h>
     16#include <linux/mm.h>
     17#include <linux/vmstat.h>
     18#include <linux/eventfd.h>
     19#include <linux/slab.h>
     20#include <linux/swap.h>
     21#include <linux/printk.h>
     22#include <linux/vmpressure.h>
     23
     24/*
     25 * The window size (vmpressure_win) is the number of scanned pages before
     26 * we try to analyze scanned/reclaimed ratio. So the window is used as a
     27 * rate-limit tunable for the "low" level notification, and also for
     28 * averaging the ratio for medium/critical levels. Using small window
     29 * sizes can cause lot of false positives, but too big window size will
     30 * delay the notifications.
     31 *
     32 * As the vmscan reclaimer logic works with chunks which are multiple of
     33 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
     34 *
     35 * TODO: Make the window size depend on machine size, as we do for vmstat
     36 * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
     37 */
     38static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
     39
     40/*
     41 * These thresholds are used when we account memory pressure through
     42 * scanned/reclaimed ratio. The current values were chosen empirically. In
     43 * essence, they are percents: the higher the value, the more number
     44 * unsuccessful reclaims there were.
     45 */
     46static const unsigned int vmpressure_level_med = 60;
     47static const unsigned int vmpressure_level_critical = 95;
     48
     49/*
     50 * When there are too little pages left to scan, vmpressure() may miss the
     51 * critical pressure as number of pages will be less than "window size".
     52 * However, in that case the vmscan priority will raise fast as the
     53 * reclaimer will try to scan LRUs more deeply.
     54 *
     55 * The vmscan logic considers these special priorities:
     56 *
     57 * prio == DEF_PRIORITY (12): reclaimer starts with that value
     58 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
     59 * prio == 0                : close to OOM, kernel scans every page in an lru
     60 *
     61 * Any value in this range is acceptable for this tunable (i.e. from 12 to
     62 * 0). Current value for the vmpressure_level_critical_prio is chosen
     63 * empirically, but the number, in essence, means that we consider
     64 * critical level when scanning depth is ~10% of the lru size (vmscan
     65 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
     66 * eights).
     67 */
     68static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
     69
     70static struct vmpressure *work_to_vmpressure(struct work_struct *work)
     71{
     72	return container_of(work, struct vmpressure, work);
     73}
     74
     75static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
     76{
     77	struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr);
     78
     79	memcg = parent_mem_cgroup(memcg);
     80	if (!memcg)
     81		return NULL;
     82	return memcg_to_vmpressure(memcg);
     83}
     84
     85enum vmpressure_levels {
     86	VMPRESSURE_LOW = 0,
     87	VMPRESSURE_MEDIUM,
     88	VMPRESSURE_CRITICAL,
     89	VMPRESSURE_NUM_LEVELS,
     90};
     91
     92enum vmpressure_modes {
     93	VMPRESSURE_NO_PASSTHROUGH = 0,
     94	VMPRESSURE_HIERARCHY,
     95	VMPRESSURE_LOCAL,
     96	VMPRESSURE_NUM_MODES,
     97};
     98
     99static const char * const vmpressure_str_levels[] = {
    100	[VMPRESSURE_LOW] = "low",
    101	[VMPRESSURE_MEDIUM] = "medium",
    102	[VMPRESSURE_CRITICAL] = "critical",
    103};
    104
    105static const char * const vmpressure_str_modes[] = {
    106	[VMPRESSURE_NO_PASSTHROUGH] = "default",
    107	[VMPRESSURE_HIERARCHY] = "hierarchy",
    108	[VMPRESSURE_LOCAL] = "local",
    109};
    110
    111static enum vmpressure_levels vmpressure_level(unsigned long pressure)
    112{
    113	if (pressure >= vmpressure_level_critical)
    114		return VMPRESSURE_CRITICAL;
    115	else if (pressure >= vmpressure_level_med)
    116		return VMPRESSURE_MEDIUM;
    117	return VMPRESSURE_LOW;
    118}
    119
    120static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
    121						    unsigned long reclaimed)
    122{
    123	unsigned long scale = scanned + reclaimed;
    124	unsigned long pressure = 0;
    125
    126	/*
    127	 * reclaimed can be greater than scanned for things such as reclaimed
    128	 * slab pages. shrink_node() just adds reclaimed pages without a
    129	 * related increment to scanned pages.
    130	 */
    131	if (reclaimed >= scanned)
    132		goto out;
    133	/*
    134	 * We calculate the ratio (in percents) of how many pages were
    135	 * scanned vs. reclaimed in a given time frame (window). Note that
    136	 * time is in VM reclaimer's "ticks", i.e. number of pages
    137	 * scanned. This makes it possible to set desired reaction time
    138	 * and serves as a ratelimit.
    139	 */
    140	pressure = scale - (reclaimed * scale / scanned);
    141	pressure = pressure * 100 / scale;
    142
    143out:
    144	pr_debug("%s: %3lu  (s: %lu  r: %lu)\n", __func__, pressure,
    145		 scanned, reclaimed);
    146
    147	return vmpressure_level(pressure);
    148}
    149
    150struct vmpressure_event {
    151	struct eventfd_ctx *efd;
    152	enum vmpressure_levels level;
    153	enum vmpressure_modes mode;
    154	struct list_head node;
    155};
    156
    157static bool vmpressure_event(struct vmpressure *vmpr,
    158			     const enum vmpressure_levels level,
    159			     bool ancestor, bool signalled)
    160{
    161	struct vmpressure_event *ev;
    162	bool ret = false;
    163
    164	mutex_lock(&vmpr->events_lock);
    165	list_for_each_entry(ev, &vmpr->events, node) {
    166		if (ancestor && ev->mode == VMPRESSURE_LOCAL)
    167			continue;
    168		if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
    169			continue;
    170		if (level < ev->level)
    171			continue;
    172		eventfd_signal(ev->efd, 1);
    173		ret = true;
    174	}
    175	mutex_unlock(&vmpr->events_lock);
    176
    177	return ret;
    178}
    179
    180static void vmpressure_work_fn(struct work_struct *work)
    181{
    182	struct vmpressure *vmpr = work_to_vmpressure(work);
    183	unsigned long scanned;
    184	unsigned long reclaimed;
    185	enum vmpressure_levels level;
    186	bool ancestor = false;
    187	bool signalled = false;
    188
    189	spin_lock(&vmpr->sr_lock);
    190	/*
    191	 * Several contexts might be calling vmpressure(), so it is
    192	 * possible that the work was rescheduled again before the old
    193	 * work context cleared the counters. In that case we will run
    194	 * just after the old work returns, but then scanned might be zero
    195	 * here. No need for any locks here since we don't care if
    196	 * vmpr->reclaimed is in sync.
    197	 */
    198	scanned = vmpr->tree_scanned;
    199	if (!scanned) {
    200		spin_unlock(&vmpr->sr_lock);
    201		return;
    202	}
    203
    204	reclaimed = vmpr->tree_reclaimed;
    205	vmpr->tree_scanned = 0;
    206	vmpr->tree_reclaimed = 0;
    207	spin_unlock(&vmpr->sr_lock);
    208
    209	level = vmpressure_calc_level(scanned, reclaimed);
    210
    211	do {
    212		if (vmpressure_event(vmpr, level, ancestor, signalled))
    213			signalled = true;
    214		ancestor = true;
    215	} while ((vmpr = vmpressure_parent(vmpr)));
    216}
    217
    218/**
    219 * vmpressure() - Account memory pressure through scanned/reclaimed ratio
    220 * @gfp:	reclaimer's gfp mask
    221 * @memcg:	cgroup memory controller handle
    222 * @tree:	legacy subtree mode
    223 * @scanned:	number of pages scanned
    224 * @reclaimed:	number of pages reclaimed
    225 *
    226 * This function should be called from the vmscan reclaim path to account
    227 * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
    228 * pressure index is then further refined and averaged over time.
    229 *
    230 * If @tree is set, vmpressure is in traditional userspace reporting
    231 * mode: @memcg is considered the pressure root and userspace is
    232 * notified of the entire subtree's reclaim efficiency.
    233 *
    234 * If @tree is not set, reclaim efficiency is recorded for @memcg, and
    235 * only in-kernel users are notified.
    236 *
    237 * This function does not return any value.
    238 */
    239void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
    240		unsigned long scanned, unsigned long reclaimed)
    241{
    242	struct vmpressure *vmpr;
    243
    244	if (mem_cgroup_disabled())
    245		return;
    246
    247	vmpr = memcg_to_vmpressure(memcg);
    248
    249	/*
    250	 * Here we only want to account pressure that userland is able to
    251	 * help us with. For example, suppose that DMA zone is under
    252	 * pressure; if we notify userland about that kind of pressure,
    253	 * then it will be mostly a waste as it will trigger unnecessary
    254	 * freeing of memory by userland (since userland is more likely to
    255	 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
    256	 * is why we include only movable, highmem and FS/IO pages.
    257	 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
    258	 * we account it too.
    259	 */
    260	if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
    261		return;
    262
    263	/*
    264	 * If we got here with no pages scanned, then that is an indicator
    265	 * that reclaimer was unable to find any shrinkable LRUs at the
    266	 * current scanning depth. But it does not mean that we should
    267	 * report the critical pressure, yet. If the scanning priority
    268	 * (scanning depth) goes too high (deep), we will be notified
    269	 * through vmpressure_prio(). But so far, keep calm.
    270	 */
    271	if (!scanned)
    272		return;
    273
    274	if (tree) {
    275		spin_lock(&vmpr->sr_lock);
    276		scanned = vmpr->tree_scanned += scanned;
    277		vmpr->tree_reclaimed += reclaimed;
    278		spin_unlock(&vmpr->sr_lock);
    279
    280		if (scanned < vmpressure_win)
    281			return;
    282		schedule_work(&vmpr->work);
    283	} else {
    284		enum vmpressure_levels level;
    285
    286		/* For now, no users for root-level efficiency */
    287		if (!memcg || mem_cgroup_is_root(memcg))
    288			return;
    289
    290		spin_lock(&vmpr->sr_lock);
    291		scanned = vmpr->scanned += scanned;
    292		reclaimed = vmpr->reclaimed += reclaimed;
    293		if (scanned < vmpressure_win) {
    294			spin_unlock(&vmpr->sr_lock);
    295			return;
    296		}
    297		vmpr->scanned = vmpr->reclaimed = 0;
    298		spin_unlock(&vmpr->sr_lock);
    299
    300		level = vmpressure_calc_level(scanned, reclaimed);
    301
    302		if (level > VMPRESSURE_LOW) {
    303			/*
    304			 * Let the socket buffer allocator know that
    305			 * we are having trouble reclaiming LRU pages.
    306			 *
    307			 * For hysteresis keep the pressure state
    308			 * asserted for a second in which subsequent
    309			 * pressure events can occur.
    310			 */
    311			WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
    312		}
    313	}
    314}
    315
    316/**
    317 * vmpressure_prio() - Account memory pressure through reclaimer priority level
    318 * @gfp:	reclaimer's gfp mask
    319 * @memcg:	cgroup memory controller handle
    320 * @prio:	reclaimer's priority
    321 *
    322 * This function should be called from the reclaim path every time when
    323 * the vmscan's reclaiming priority (scanning depth) changes.
    324 *
    325 * This function does not return any value.
    326 */
    327void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
    328{
    329	/*
    330	 * We only use prio for accounting critical level. For more info
    331	 * see comment for vmpressure_level_critical_prio variable above.
    332	 */
    333	if (prio > vmpressure_level_critical_prio)
    334		return;
    335
    336	/*
    337	 * OK, the prio is below the threshold, updating vmpressure
    338	 * information before shrinker dives into long shrinking of long
    339	 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
    340	 * to the vmpressure() basically means that we signal 'critical'
    341	 * level.
    342	 */
    343	vmpressure(gfp, memcg, true, vmpressure_win, 0);
    344}
    345
    346#define MAX_VMPRESSURE_ARGS_LEN	(strlen("critical") + strlen("hierarchy") + 2)
    347
    348/**
    349 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
    350 * @memcg:	memcg that is interested in vmpressure notifications
    351 * @eventfd:	eventfd context to link notifications with
    352 * @args:	event arguments (pressure level threshold, optional mode)
    353 *
    354 * This function associates eventfd context with the vmpressure
    355 * infrastructure, so that the notifications will be delivered to the
    356 * @eventfd. The @args parameter is a comma-delimited string that denotes a
    357 * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
    358 * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
    359 * "hierarchy" or "local").
    360 *
    361 * To be used as memcg event method.
    362 *
    363 * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
    364 * not be parsed.
    365 */
    366int vmpressure_register_event(struct mem_cgroup *memcg,
    367			      struct eventfd_ctx *eventfd, const char *args)
    368{
    369	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
    370	struct vmpressure_event *ev;
    371	enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
    372	enum vmpressure_levels level;
    373	char *spec, *spec_orig;
    374	char *token;
    375	int ret = 0;
    376
    377	spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
    378	if (!spec)
    379		return -ENOMEM;
    380
    381	/* Find required level */
    382	token = strsep(&spec, ",");
    383	ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
    384	if (ret < 0)
    385		goto out;
    386	level = ret;
    387
    388	/* Find optional mode */
    389	token = strsep(&spec, ",");
    390	if (token) {
    391		ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
    392		if (ret < 0)
    393			goto out;
    394		mode = ret;
    395	}
    396
    397	ev = kzalloc(sizeof(*ev), GFP_KERNEL);
    398	if (!ev) {
    399		ret = -ENOMEM;
    400		goto out;
    401	}
    402
    403	ev->efd = eventfd;
    404	ev->level = level;
    405	ev->mode = mode;
    406
    407	mutex_lock(&vmpr->events_lock);
    408	list_add(&ev->node, &vmpr->events);
    409	mutex_unlock(&vmpr->events_lock);
    410	ret = 0;
    411out:
    412	kfree(spec_orig);
    413	return ret;
    414}
    415
    416/**
    417 * vmpressure_unregister_event() - Unbind eventfd from vmpressure
    418 * @memcg:	memcg handle
    419 * @eventfd:	eventfd context that was used to link vmpressure with the @cg
    420 *
    421 * This function does internal manipulations to detach the @eventfd from
    422 * the vmpressure notifications, and then frees internal resources
    423 * associated with the @eventfd (but the @eventfd itself is not freed).
    424 *
    425 * To be used as memcg event method.
    426 */
    427void vmpressure_unregister_event(struct mem_cgroup *memcg,
    428				 struct eventfd_ctx *eventfd)
    429{
    430	struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
    431	struct vmpressure_event *ev;
    432
    433	mutex_lock(&vmpr->events_lock);
    434	list_for_each_entry(ev, &vmpr->events, node) {
    435		if (ev->efd != eventfd)
    436			continue;
    437		list_del(&ev->node);
    438		kfree(ev);
    439		break;
    440	}
    441	mutex_unlock(&vmpr->events_lock);
    442}
    443
    444/**
    445 * vmpressure_init() - Initialize vmpressure control structure
    446 * @vmpr:	Structure to be initialized
    447 *
    448 * This function should be called on every allocated vmpressure structure
    449 * before any usage.
    450 */
    451void vmpressure_init(struct vmpressure *vmpr)
    452{
    453	spin_lock_init(&vmpr->sr_lock);
    454	mutex_init(&vmpr->events_lock);
    455	INIT_LIST_HEAD(&vmpr->events);
    456	INIT_WORK(&vmpr->work, vmpressure_work_fn);
    457}
    458
    459/**
    460 * vmpressure_cleanup() - shuts down vmpressure control structure
    461 * @vmpr:	Structure to be cleaned up
    462 *
    463 * This function should be called before the structure in which it is
    464 * embedded is cleaned up.
    465 */
    466void vmpressure_cleanup(struct vmpressure *vmpr)
    467{
    468	/*
    469	 * Make sure there is no pending work before eventfd infrastructure
    470	 * goes away.
    471	 */
    472	flush_work(&vmpr->work);
    473}