cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

percpu-stats.c (5988B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * mm/percpu-debug.c
      4 *
      5 * Copyright (C) 2017		Facebook Inc.
      6 * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
      7 *
      8 * Prints statistics about the percpu allocator and backing chunks.
      9 */
     10#include <linux/debugfs.h>
     11#include <linux/list.h>
     12#include <linux/percpu.h>
     13#include <linux/seq_file.h>
     14#include <linux/sort.h>
     15#include <linux/vmalloc.h>
     16
     17#include "percpu-internal.h"
     18
     19#define P(X, Y) \
     20	seq_printf(m, "  %-20s: %12lld\n", X, (long long int)Y)
     21
     22struct percpu_stats pcpu_stats;
     23struct pcpu_alloc_info pcpu_stats_ai;
     24
     25static int cmpint(const void *a, const void *b)
     26{
     27	return *(int *)a - *(int *)b;
     28}
     29
     30/*
     31 * Iterates over all chunks to find the max nr_alloc entries.
     32 */
     33static int find_max_nr_alloc(void)
     34{
     35	struct pcpu_chunk *chunk;
     36	int slot, max_nr_alloc;
     37
     38	max_nr_alloc = 0;
     39	for (slot = 0; slot < pcpu_nr_slots; slot++)
     40		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list)
     41			max_nr_alloc = max(max_nr_alloc, chunk->nr_alloc);
     42
     43	return max_nr_alloc;
     44}
     45
     46/*
     47 * Prints out chunk state. Fragmentation is considered between
     48 * the beginning of the chunk to the last allocation.
     49 *
     50 * All statistics are in bytes unless stated otherwise.
     51 */
     52static void chunk_map_stats(struct seq_file *m, struct pcpu_chunk *chunk,
     53			    int *buffer)
     54{
     55	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
     56	int i, last_alloc, as_len, start, end;
     57	int *alloc_sizes, *p;
     58	/* statistics */
     59	int sum_frag = 0, max_frag = 0;
     60	int cur_min_alloc = 0, cur_med_alloc = 0, cur_max_alloc = 0;
     61
     62	alloc_sizes = buffer;
     63
     64	/*
     65	 * find_last_bit returns the start value if nothing found.
     66	 * Therefore, we must determine if it is a failure of find_last_bit
     67	 * and set the appropriate value.
     68	 */
     69	last_alloc = find_last_bit(chunk->alloc_map,
     70				   pcpu_chunk_map_bits(chunk) -
     71				   chunk->end_offset / PCPU_MIN_ALLOC_SIZE - 1);
     72	last_alloc = test_bit(last_alloc, chunk->alloc_map) ?
     73		     last_alloc + 1 : 0;
     74
     75	as_len = 0;
     76	start = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
     77
     78	/*
     79	 * If a bit is set in the allocation map, the bound_map identifies
     80	 * where the allocation ends.  If the allocation is not set, the
     81	 * bound_map does not identify free areas as it is only kept accurate
     82	 * on allocation, not free.
     83	 *
     84	 * Positive values are allocations and negative values are free
     85	 * fragments.
     86	 */
     87	while (start < last_alloc) {
     88		if (test_bit(start, chunk->alloc_map)) {
     89			end = find_next_bit(chunk->bound_map, last_alloc,
     90					    start + 1);
     91			alloc_sizes[as_len] = 1;
     92		} else {
     93			end = find_next_bit(chunk->alloc_map, last_alloc,
     94					    start + 1);
     95			alloc_sizes[as_len] = -1;
     96		}
     97
     98		alloc_sizes[as_len++] *= (end - start) * PCPU_MIN_ALLOC_SIZE;
     99
    100		start = end;
    101	}
    102
    103	/*
    104	 * The negative values are free fragments and thus sorting gives the
    105	 * free fragments at the beginning in largest first order.
    106	 */
    107	if (as_len > 0) {
    108		sort(alloc_sizes, as_len, sizeof(int), cmpint, NULL);
    109
    110		/* iterate through the unallocated fragments */
    111		for (i = 0, p = alloc_sizes; *p < 0 && i < as_len; i++, p++) {
    112			sum_frag -= *p;
    113			max_frag = max(max_frag, -1 * (*p));
    114		}
    115
    116		cur_min_alloc = alloc_sizes[i];
    117		cur_med_alloc = alloc_sizes[(i + as_len - 1) / 2];
    118		cur_max_alloc = alloc_sizes[as_len - 1];
    119	}
    120
    121	P("nr_alloc", chunk->nr_alloc);
    122	P("max_alloc_size", chunk->max_alloc_size);
    123	P("empty_pop_pages", chunk->nr_empty_pop_pages);
    124	P("first_bit", chunk_md->first_free);
    125	P("free_bytes", chunk->free_bytes);
    126	P("contig_bytes", chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
    127	P("sum_frag", sum_frag);
    128	P("max_frag", max_frag);
    129	P("cur_min_alloc", cur_min_alloc);
    130	P("cur_med_alloc", cur_med_alloc);
    131	P("cur_max_alloc", cur_max_alloc);
    132	seq_putc(m, '\n');
    133}
    134
    135static int percpu_stats_show(struct seq_file *m, void *v)
    136{
    137	struct pcpu_chunk *chunk;
    138	int slot, max_nr_alloc;
    139	int *buffer;
    140
    141alloc_buffer:
    142	spin_lock_irq(&pcpu_lock);
    143	max_nr_alloc = find_max_nr_alloc();
    144	spin_unlock_irq(&pcpu_lock);
    145
    146	/* there can be at most this many free and allocated fragments */
    147	buffer = vmalloc_array(2 * max_nr_alloc + 1, sizeof(int));
    148	if (!buffer)
    149		return -ENOMEM;
    150
    151	spin_lock_irq(&pcpu_lock);
    152
    153	/* if the buffer allocated earlier is too small */
    154	if (max_nr_alloc < find_max_nr_alloc()) {
    155		spin_unlock_irq(&pcpu_lock);
    156		vfree(buffer);
    157		goto alloc_buffer;
    158	}
    159
    160#define PL(X)								\
    161	seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
    162
    163	seq_printf(m,
    164			"Percpu Memory Statistics\n"
    165			"Allocation Info:\n"
    166			"----------------------------------------\n");
    167	PL(unit_size);
    168	PL(static_size);
    169	PL(reserved_size);
    170	PL(dyn_size);
    171	PL(atom_size);
    172	PL(alloc_size);
    173	seq_putc(m, '\n');
    174
    175#undef PL
    176
    177#define PU(X) \
    178	seq_printf(m, "  %-20s: %12llu\n", #X, (unsigned long long)pcpu_stats.X)
    179
    180	seq_printf(m,
    181			"Global Stats:\n"
    182			"----------------------------------------\n");
    183	PU(nr_alloc);
    184	PU(nr_dealloc);
    185	PU(nr_cur_alloc);
    186	PU(nr_max_alloc);
    187	PU(nr_chunks);
    188	PU(nr_max_chunks);
    189	PU(min_alloc_size);
    190	PU(max_alloc_size);
    191	P("empty_pop_pages", pcpu_nr_empty_pop_pages);
    192	seq_putc(m, '\n');
    193
    194#undef PU
    195
    196	seq_printf(m,
    197			"Per Chunk Stats:\n"
    198			"----------------------------------------\n");
    199
    200	if (pcpu_reserved_chunk) {
    201		seq_puts(m, "Chunk: <- Reserved Chunk\n");
    202		chunk_map_stats(m, pcpu_reserved_chunk, buffer);
    203	}
    204
    205	for (slot = 0; slot < pcpu_nr_slots; slot++) {
    206		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
    207			if (chunk == pcpu_first_chunk)
    208				seq_puts(m, "Chunk: <- First Chunk\n");
    209			else if (slot == pcpu_to_depopulate_slot)
    210				seq_puts(m, "Chunk (to_depopulate)\n");
    211			else if (slot == pcpu_sidelined_slot)
    212				seq_puts(m, "Chunk (sidelined):\n");
    213			else
    214				seq_puts(m, "Chunk:\n");
    215			chunk_map_stats(m, chunk, buffer);
    216		}
    217	}
    218
    219	spin_unlock_irq(&pcpu_lock);
    220
    221	vfree(buffer);
    222
    223	return 0;
    224}
    225DEFINE_SHOW_ATTRIBUTE(percpu_stats);
    226
    227static int __init init_percpu_stats_debugfs(void)
    228{
    229	debugfs_create_file("percpu_stats", 0444, NULL, NULL,
    230			&percpu_stats_fops);
    231
    232	return 0;
    233}
    234
    235late_initcall(init_percpu_stats_debugfs);