cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

percpu.c (104959B)


      1// SPDX-License-Identifier: GPL-2.0-only
      2/*
      3 * mm/percpu.c - percpu memory allocator
      4 *
      5 * Copyright (C) 2009		SUSE Linux Products GmbH
      6 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
      7 *
      8 * Copyright (C) 2017		Facebook Inc.
      9 * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
     10 *
     11 * The percpu allocator handles both static and dynamic areas.  Percpu
     12 * areas are allocated in chunks which are divided into units.  There is
     13 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
     14 * based on NUMA properties of the machine.
     15 *
     16 *  c0                           c1                         c2
     17 *  -------------------          -------------------        ------------
     18 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
     19 *  -------------------  ......  -------------------  ....  ------------
     20 *
     21 * Allocation is done by offsets into a unit's address space.  Ie., an
     22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
     23 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
     24 * and even sparse.  Access is handled by configuring percpu base
     25 * registers according to the cpu to unit mappings and offsetting the
     26 * base address using pcpu_unit_size.
     27 *
     28 * There is special consideration for the first chunk which must handle
     29 * the static percpu variables in the kernel image as allocation services
     30 * are not online yet.  In short, the first chunk is structured like so:
     31 *
     32 *                  <Static | [Reserved] | Dynamic>
     33 *
     34 * The static data is copied from the original section managed by the
     35 * linker.  The reserved section, if non-zero, primarily manages static
     36 * percpu variables from kernel modules.  Finally, the dynamic section
     37 * takes care of normal allocations.
     38 *
     39 * The allocator organizes chunks into lists according to free size and
     40 * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
     41 * flag should be passed.  All memcg-aware allocations are sharing one set
     42 * of chunks and all unaccounted allocations and allocations performed
     43 * by processes belonging to the root memory cgroup are using the second set.
     44 *
     45 * The allocator tries to allocate from the fullest chunk first. Each chunk
     46 * is managed by a bitmap with metadata blocks.  The allocation map is updated
     47 * on every allocation and free to reflect the current state while the boundary
     48 * map is only updated on allocation.  Each metadata block contains
     49 * information to help mitigate the need to iterate over large portions
     50 * of the bitmap.  The reverse mapping from page to chunk is stored in
     51 * the page's index.  Lastly, units are lazily backed and grow in unison.
     52 *
     53 * There is a unique conversion that goes on here between bytes and bits.
     54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
     55 * tracks the number of pages it is responsible for in nr_pages.  Helper
     56 * functions are used to convert from between the bytes, bits, and blocks.
     57 * All hints are managed in bits unless explicitly stated.
     58 *
     59 * To use this allocator, arch code should do the following:
     60 *
     61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
     62 *   regular address to percpu pointer and back if they need to be
     63 *   different from the default
     64 *
     65 * - use pcpu_setup_first_chunk() during percpu area initialization to
     66 *   setup the first chunk containing the kernel static percpu area
     67 */
     68
     69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
     70
     71#include <linux/bitmap.h>
     72#include <linux/cpumask.h>
     73#include <linux/memblock.h>
     74#include <linux/err.h>
     75#include <linux/lcm.h>
     76#include <linux/list.h>
     77#include <linux/log2.h>
     78#include <linux/mm.h>
     79#include <linux/module.h>
     80#include <linux/mutex.h>
     81#include <linux/percpu.h>
     82#include <linux/pfn.h>
     83#include <linux/slab.h>
     84#include <linux/spinlock.h>
     85#include <linux/vmalloc.h>
     86#include <linux/workqueue.h>
     87#include <linux/kmemleak.h>
     88#include <linux/sched.h>
     89#include <linux/sched/mm.h>
     90#include <linux/memcontrol.h>
     91
     92#include <asm/cacheflush.h>
     93#include <asm/sections.h>
     94#include <asm/tlbflush.h>
     95#include <asm/io.h>
     96
     97#define CREATE_TRACE_POINTS
     98#include <trace/events/percpu.h>
     99
    100#include "percpu-internal.h"
    101
    102/*
    103 * The slots are sorted by the size of the biggest continuous free area.
    104 * 1-31 bytes share the same slot.
    105 */
    106#define PCPU_SLOT_BASE_SHIFT		5
    107/* chunks in slots below this are subject to being sidelined on failed alloc */
    108#define PCPU_SLOT_FAIL_THRESHOLD	3
    109
    110#define PCPU_EMPTY_POP_PAGES_LOW	2
    111#define PCPU_EMPTY_POP_PAGES_HIGH	4
    112
    113#ifdef CONFIG_SMP
    114/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
    115#ifndef __addr_to_pcpu_ptr
    116#define __addr_to_pcpu_ptr(addr)					\
    117	(void __percpu *)((unsigned long)(addr) -			\
    118			  (unsigned long)pcpu_base_addr	+		\
    119			  (unsigned long)__per_cpu_start)
    120#endif
    121#ifndef __pcpu_ptr_to_addr
    122#define __pcpu_ptr_to_addr(ptr)						\
    123	(void __force *)((unsigned long)(ptr) +				\
    124			 (unsigned long)pcpu_base_addr -		\
    125			 (unsigned long)__per_cpu_start)
    126#endif
    127#else	/* CONFIG_SMP */
    128/* on UP, it's always identity mapped */
    129#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
    130#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
    131#endif	/* CONFIG_SMP */
    132
    133static int pcpu_unit_pages __ro_after_init;
    134static int pcpu_unit_size __ro_after_init;
    135static int pcpu_nr_units __ro_after_init;
    136static int pcpu_atom_size __ro_after_init;
    137int pcpu_nr_slots __ro_after_init;
    138static int pcpu_free_slot __ro_after_init;
    139int pcpu_sidelined_slot __ro_after_init;
    140int pcpu_to_depopulate_slot __ro_after_init;
    141static size_t pcpu_chunk_struct_size __ro_after_init;
    142
    143/* cpus with the lowest and highest unit addresses */
    144static unsigned int pcpu_low_unit_cpu __ro_after_init;
    145static unsigned int pcpu_high_unit_cpu __ro_after_init;
    146
    147/* the address of the first chunk which starts with the kernel static area */
    148void *pcpu_base_addr __ro_after_init;
    149
    150static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
    151const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
    152
    153/* group information, used for vm allocation */
    154static int pcpu_nr_groups __ro_after_init;
    155static const unsigned long *pcpu_group_offsets __ro_after_init;
    156static const size_t *pcpu_group_sizes __ro_after_init;
    157
    158/*
    159 * The first chunk which always exists.  Note that unlike other
    160 * chunks, this one can be allocated and mapped in several different
    161 * ways and thus often doesn't live in the vmalloc area.
    162 */
    163struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
    164
    165/*
    166 * Optional reserved chunk.  This chunk reserves part of the first
    167 * chunk and serves it for reserved allocations.  When the reserved
    168 * region doesn't exist, the following variable is NULL.
    169 */
    170struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
    171
    172DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
    173static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
    174
    175struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
    176
    177/* chunks which need their map areas extended, protected by pcpu_lock */
    178static LIST_HEAD(pcpu_map_extend_chunks);
    179
    180/*
    181 * The number of empty populated pages, protected by pcpu_lock.
    182 * The reserved chunk doesn't contribute to the count.
    183 */
    184int pcpu_nr_empty_pop_pages;
    185
    186/*
    187 * The number of populated pages in use by the allocator, protected by
    188 * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
    189 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
    190 * and increments/decrements this count by 1).
    191 */
    192static unsigned long pcpu_nr_populated;
    193
    194/*
    195 * Balance work is used to populate or destroy chunks asynchronously.  We
    196 * try to keep the number of populated free pages between
    197 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
    198 * empty chunk.
    199 */
    200static void pcpu_balance_workfn(struct work_struct *work);
    201static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
    202static bool pcpu_async_enabled __read_mostly;
    203static bool pcpu_atomic_alloc_failed;
    204
    205static void pcpu_schedule_balance_work(void)
    206{
    207	if (pcpu_async_enabled)
    208		schedule_work(&pcpu_balance_work);
    209}
    210
    211/**
    212 * pcpu_addr_in_chunk - check if the address is served from this chunk
    213 * @chunk: chunk of interest
    214 * @addr: percpu address
    215 *
    216 * RETURNS:
    217 * True if the address is served from this chunk.
    218 */
    219static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
    220{
    221	void *start_addr, *end_addr;
    222
    223	if (!chunk)
    224		return false;
    225
    226	start_addr = chunk->base_addr + chunk->start_offset;
    227	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
    228		   chunk->end_offset;
    229
    230	return addr >= start_addr && addr < end_addr;
    231}
    232
    233static int __pcpu_size_to_slot(int size)
    234{
    235	int highbit = fls(size);	/* size is in bytes */
    236	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
    237}
    238
    239static int pcpu_size_to_slot(int size)
    240{
    241	if (size == pcpu_unit_size)
    242		return pcpu_free_slot;
    243	return __pcpu_size_to_slot(size);
    244}
    245
    246static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
    247{
    248	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
    249
    250	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
    251	    chunk_md->contig_hint == 0)
    252		return 0;
    253
    254	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
    255}
    256
    257/* set the pointer to a chunk in a page struct */
    258static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
    259{
    260	page->index = (unsigned long)pcpu;
    261}
    262
    263/* obtain pointer to a chunk from a page struct */
    264static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
    265{
    266	return (struct pcpu_chunk *)page->index;
    267}
    268
    269static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
    270{
    271	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
    272}
    273
    274static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
    275{
    276	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
    277}
    278
    279static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
    280				     unsigned int cpu, int page_idx)
    281{
    282	return (unsigned long)chunk->base_addr +
    283	       pcpu_unit_page_offset(cpu, page_idx);
    284}
    285
    286/*
    287 * The following are helper functions to help access bitmaps and convert
    288 * between bitmap offsets to address offsets.
    289 */
    290static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
    291{
    292	return chunk->alloc_map +
    293	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
    294}
    295
    296static unsigned long pcpu_off_to_block_index(int off)
    297{
    298	return off / PCPU_BITMAP_BLOCK_BITS;
    299}
    300
    301static unsigned long pcpu_off_to_block_off(int off)
    302{
    303	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
    304}
    305
    306static unsigned long pcpu_block_off_to_off(int index, int off)
    307{
    308	return index * PCPU_BITMAP_BLOCK_BITS + off;
    309}
    310
    311/**
    312 * pcpu_check_block_hint - check against the contig hint
    313 * @block: block of interest
    314 * @bits: size of allocation
    315 * @align: alignment of area (max PAGE_SIZE)
    316 *
    317 * Check to see if the allocation can fit in the block's contig hint.
    318 * Note, a chunk uses the same hints as a block so this can also check against
    319 * the chunk's contig hint.
    320 */
    321static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
    322				  size_t align)
    323{
    324	int bit_off = ALIGN(block->contig_hint_start, align) -
    325		block->contig_hint_start;
    326
    327	return bit_off + bits <= block->contig_hint;
    328}
    329
    330/*
    331 * pcpu_next_hint - determine which hint to use
    332 * @block: block of interest
    333 * @alloc_bits: size of allocation
    334 *
    335 * This determines if we should scan based on the scan_hint or first_free.
    336 * In general, we want to scan from first_free to fulfill allocations by
    337 * first fit.  However, if we know a scan_hint at position scan_hint_start
    338 * cannot fulfill an allocation, we can begin scanning from there knowing
    339 * the contig_hint will be our fallback.
    340 */
    341static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
    342{
    343	/*
    344	 * The three conditions below determine if we can skip past the
    345	 * scan_hint.  First, does the scan hint exist.  Second, is the
    346	 * contig_hint after the scan_hint (possibly not true iff
    347	 * contig_hint == scan_hint).  Third, is the allocation request
    348	 * larger than the scan_hint.
    349	 */
    350	if (block->scan_hint &&
    351	    block->contig_hint_start > block->scan_hint_start &&
    352	    alloc_bits > block->scan_hint)
    353		return block->scan_hint_start + block->scan_hint;
    354
    355	return block->first_free;
    356}
    357
    358/**
    359 * pcpu_next_md_free_region - finds the next hint free area
    360 * @chunk: chunk of interest
    361 * @bit_off: chunk offset
    362 * @bits: size of free area
    363 *
    364 * Helper function for pcpu_for_each_md_free_region.  It checks
    365 * block->contig_hint and performs aggregation across blocks to find the
    366 * next hint.  It modifies bit_off and bits in-place to be consumed in the
    367 * loop.
    368 */
    369static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
    370				     int *bits)
    371{
    372	int i = pcpu_off_to_block_index(*bit_off);
    373	int block_off = pcpu_off_to_block_off(*bit_off);
    374	struct pcpu_block_md *block;
    375
    376	*bits = 0;
    377	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
    378	     block++, i++) {
    379		/* handles contig area across blocks */
    380		if (*bits) {
    381			*bits += block->left_free;
    382			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
    383				continue;
    384			return;
    385		}
    386
    387		/*
    388		 * This checks three things.  First is there a contig_hint to
    389		 * check.  Second, have we checked this hint before by
    390		 * comparing the block_off.  Third, is this the same as the
    391		 * right contig hint.  In the last case, it spills over into
    392		 * the next block and should be handled by the contig area
    393		 * across blocks code.
    394		 */
    395		*bits = block->contig_hint;
    396		if (*bits && block->contig_hint_start >= block_off &&
    397		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
    398			*bit_off = pcpu_block_off_to_off(i,
    399					block->contig_hint_start);
    400			return;
    401		}
    402		/* reset to satisfy the second predicate above */
    403		block_off = 0;
    404
    405		*bits = block->right_free;
    406		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
    407	}
    408}
    409
    410/**
    411 * pcpu_next_fit_region - finds fit areas for a given allocation request
    412 * @chunk: chunk of interest
    413 * @alloc_bits: size of allocation
    414 * @align: alignment of area (max PAGE_SIZE)
    415 * @bit_off: chunk offset
    416 * @bits: size of free area
    417 *
    418 * Finds the next free region that is viable for use with a given size and
    419 * alignment.  This only returns if there is a valid area to be used for this
    420 * allocation.  block->first_free is returned if the allocation request fits
    421 * within the block to see if the request can be fulfilled prior to the contig
    422 * hint.
    423 */
    424static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
    425				 int align, int *bit_off, int *bits)
    426{
    427	int i = pcpu_off_to_block_index(*bit_off);
    428	int block_off = pcpu_off_to_block_off(*bit_off);
    429	struct pcpu_block_md *block;
    430
    431	*bits = 0;
    432	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
    433	     block++, i++) {
    434		/* handles contig area across blocks */
    435		if (*bits) {
    436			*bits += block->left_free;
    437			if (*bits >= alloc_bits)
    438				return;
    439			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
    440				continue;
    441		}
    442
    443		/* check block->contig_hint */
    444		*bits = ALIGN(block->contig_hint_start, align) -
    445			block->contig_hint_start;
    446		/*
    447		 * This uses the block offset to determine if this has been
    448		 * checked in the prior iteration.
    449		 */
    450		if (block->contig_hint &&
    451		    block->contig_hint_start >= block_off &&
    452		    block->contig_hint >= *bits + alloc_bits) {
    453			int start = pcpu_next_hint(block, alloc_bits);
    454
    455			*bits += alloc_bits + block->contig_hint_start -
    456				 start;
    457			*bit_off = pcpu_block_off_to_off(i, start);
    458			return;
    459		}
    460		/* reset to satisfy the second predicate above */
    461		block_off = 0;
    462
    463		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
    464				 align);
    465		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
    466		*bit_off = pcpu_block_off_to_off(i, *bit_off);
    467		if (*bits >= alloc_bits)
    468			return;
    469	}
    470
    471	/* no valid offsets were found - fail condition */
    472	*bit_off = pcpu_chunk_map_bits(chunk);
    473}
    474
    475/*
    476 * Metadata free area iterators.  These perform aggregation of free areas
    477 * based on the metadata blocks and return the offset @bit_off and size in
    478 * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
    479 * a fit is found for the allocation request.
    480 */
    481#define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
    482	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
    483	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
    484	     (bit_off) += (bits) + 1,					\
    485	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
    486
    487#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
    488	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
    489				  &(bits));				      \
    490	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
    491	     (bit_off) += (bits),					      \
    492	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
    493				  &(bits)))
    494
    495/**
    496 * pcpu_mem_zalloc - allocate memory
    497 * @size: bytes to allocate
    498 * @gfp: allocation flags
    499 *
    500 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
    501 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
    502 * This is to facilitate passing through whitelisted flags.  The
    503 * returned memory is always zeroed.
    504 *
    505 * RETURNS:
    506 * Pointer to the allocated area on success, NULL on failure.
    507 */
    508static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
    509{
    510	if (WARN_ON_ONCE(!slab_is_available()))
    511		return NULL;
    512
    513	if (size <= PAGE_SIZE)
    514		return kzalloc(size, gfp);
    515	else
    516		return __vmalloc(size, gfp | __GFP_ZERO);
    517}
    518
    519/**
    520 * pcpu_mem_free - free memory
    521 * @ptr: memory to free
    522 *
    523 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
    524 */
    525static void pcpu_mem_free(void *ptr)
    526{
    527	kvfree(ptr);
    528}
    529
    530static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
    531			      bool move_front)
    532{
    533	if (chunk != pcpu_reserved_chunk) {
    534		if (move_front)
    535			list_move(&chunk->list, &pcpu_chunk_lists[slot]);
    536		else
    537			list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
    538	}
    539}
    540
    541static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
    542{
    543	__pcpu_chunk_move(chunk, slot, true);
    544}
    545
    546/**
    547 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
    548 * @chunk: chunk of interest
    549 * @oslot: the previous slot it was on
    550 *
    551 * This function is called after an allocation or free changed @chunk.
    552 * New slot according to the changed state is determined and @chunk is
    553 * moved to the slot.  Note that the reserved chunk is never put on
    554 * chunk slots.
    555 *
    556 * CONTEXT:
    557 * pcpu_lock.
    558 */
    559static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
    560{
    561	int nslot = pcpu_chunk_slot(chunk);
    562
    563	/* leave isolated chunks in-place */
    564	if (chunk->isolated)
    565		return;
    566
    567	if (oslot != nslot)
    568		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
    569}
    570
    571static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
    572{
    573	lockdep_assert_held(&pcpu_lock);
    574
    575	if (!chunk->isolated) {
    576		chunk->isolated = true;
    577		pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
    578	}
    579	list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
    580}
    581
    582static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
    583{
    584	lockdep_assert_held(&pcpu_lock);
    585
    586	if (chunk->isolated) {
    587		chunk->isolated = false;
    588		pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
    589		pcpu_chunk_relocate(chunk, -1);
    590	}
    591}
    592
    593/*
    594 * pcpu_update_empty_pages - update empty page counters
    595 * @chunk: chunk of interest
    596 * @nr: nr of empty pages
    597 *
    598 * This is used to keep track of the empty pages now based on the premise
    599 * a md_block covers a page.  The hint update functions recognize if a block
    600 * is made full or broken to calculate deltas for keeping track of free pages.
    601 */
    602static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
    603{
    604	chunk->nr_empty_pop_pages += nr;
    605	if (chunk != pcpu_reserved_chunk && !chunk->isolated)
    606		pcpu_nr_empty_pop_pages += nr;
    607}
    608
    609/*
    610 * pcpu_region_overlap - determines if two regions overlap
    611 * @a: start of first region, inclusive
    612 * @b: end of first region, exclusive
    613 * @x: start of second region, inclusive
    614 * @y: end of second region, exclusive
    615 *
    616 * This is used to determine if the hint region [a, b) overlaps with the
    617 * allocated region [x, y).
    618 */
    619static inline bool pcpu_region_overlap(int a, int b, int x, int y)
    620{
    621	return (a < y) && (x < b);
    622}
    623
    624/**
    625 * pcpu_block_update - updates a block given a free area
    626 * @block: block of interest
    627 * @start: start offset in block
    628 * @end: end offset in block
    629 *
    630 * Updates a block given a known free area.  The region [start, end) is
    631 * expected to be the entirety of the free area within a block.  Chooses
    632 * the best starting offset if the contig hints are equal.
    633 */
    634static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
    635{
    636	int contig = end - start;
    637
    638	block->first_free = min(block->first_free, start);
    639	if (start == 0)
    640		block->left_free = contig;
    641
    642	if (end == block->nr_bits)
    643		block->right_free = contig;
    644
    645	if (contig > block->contig_hint) {
    646		/* promote the old contig_hint to be the new scan_hint */
    647		if (start > block->contig_hint_start) {
    648			if (block->contig_hint > block->scan_hint) {
    649				block->scan_hint_start =
    650					block->contig_hint_start;
    651				block->scan_hint = block->contig_hint;
    652			} else if (start < block->scan_hint_start) {
    653				/*
    654				 * The old contig_hint == scan_hint.  But, the
    655				 * new contig is larger so hold the invariant
    656				 * scan_hint_start < contig_hint_start.
    657				 */
    658				block->scan_hint = 0;
    659			}
    660		} else {
    661			block->scan_hint = 0;
    662		}
    663		block->contig_hint_start = start;
    664		block->contig_hint = contig;
    665	} else if (contig == block->contig_hint) {
    666		if (block->contig_hint_start &&
    667		    (!start ||
    668		     __ffs(start) > __ffs(block->contig_hint_start))) {
    669			/* start has a better alignment so use it */
    670			block->contig_hint_start = start;
    671			if (start < block->scan_hint_start &&
    672			    block->contig_hint > block->scan_hint)
    673				block->scan_hint = 0;
    674		} else if (start > block->scan_hint_start ||
    675			   block->contig_hint > block->scan_hint) {
    676			/*
    677			 * Knowing contig == contig_hint, update the scan_hint
    678			 * if it is farther than or larger than the current
    679			 * scan_hint.
    680			 */
    681			block->scan_hint_start = start;
    682			block->scan_hint = contig;
    683		}
    684	} else {
    685		/*
    686		 * The region is smaller than the contig_hint.  So only update
    687		 * the scan_hint if it is larger than or equal and farther than
    688		 * the current scan_hint.
    689		 */
    690		if ((start < block->contig_hint_start &&
    691		     (contig > block->scan_hint ||
    692		      (contig == block->scan_hint &&
    693		       start > block->scan_hint_start)))) {
    694			block->scan_hint_start = start;
    695			block->scan_hint = contig;
    696		}
    697	}
    698}
    699
    700/*
    701 * pcpu_block_update_scan - update a block given a free area from a scan
    702 * @chunk: chunk of interest
    703 * @bit_off: chunk offset
    704 * @bits: size of free area
    705 *
    706 * Finding the final allocation spot first goes through pcpu_find_block_fit()
    707 * to find a block that can hold the allocation and then pcpu_alloc_area()
    708 * where a scan is used.  When allocations require specific alignments,
    709 * we can inadvertently create holes which will not be seen in the alloc
    710 * or free paths.
    711 *
    712 * This takes a given free area hole and updates a block as it may change the
    713 * scan_hint.  We need to scan backwards to ensure we don't miss free bits
    714 * from alignment.
    715 */
    716static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
    717				   int bits)
    718{
    719	int s_off = pcpu_off_to_block_off(bit_off);
    720	int e_off = s_off + bits;
    721	int s_index, l_bit;
    722	struct pcpu_block_md *block;
    723
    724	if (e_off > PCPU_BITMAP_BLOCK_BITS)
    725		return;
    726
    727	s_index = pcpu_off_to_block_index(bit_off);
    728	block = chunk->md_blocks + s_index;
    729
    730	/* scan backwards in case of alignment skipping free bits */
    731	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
    732	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
    733
    734	pcpu_block_update(block, s_off, e_off);
    735}
    736
    737/**
    738 * pcpu_chunk_refresh_hint - updates metadata about a chunk
    739 * @chunk: chunk of interest
    740 * @full_scan: if we should scan from the beginning
    741 *
    742 * Iterates over the metadata blocks to find the largest contig area.
    743 * A full scan can be avoided on the allocation path as this is triggered
    744 * if we broke the contig_hint.  In doing so, the scan_hint will be before
    745 * the contig_hint or after if the scan_hint == contig_hint.  This cannot
    746 * be prevented on freeing as we want to find the largest area possibly
    747 * spanning blocks.
    748 */
    749static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
    750{
    751	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
    752	int bit_off, bits;
    753
    754	/* promote scan_hint to contig_hint */
    755	if (!full_scan && chunk_md->scan_hint) {
    756		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
    757		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
    758		chunk_md->contig_hint = chunk_md->scan_hint;
    759		chunk_md->scan_hint = 0;
    760	} else {
    761		bit_off = chunk_md->first_free;
    762		chunk_md->contig_hint = 0;
    763	}
    764
    765	bits = 0;
    766	pcpu_for_each_md_free_region(chunk, bit_off, bits)
    767		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
    768}
    769
    770/**
    771 * pcpu_block_refresh_hint
    772 * @chunk: chunk of interest
    773 * @index: index of the metadata block
    774 *
    775 * Scans over the block beginning at first_free and updates the block
    776 * metadata accordingly.
    777 */
    778static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
    779{
    780	struct pcpu_block_md *block = chunk->md_blocks + index;
    781	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
    782	unsigned int start, end;	/* region start, region end */
    783
    784	/* promote scan_hint to contig_hint */
    785	if (block->scan_hint) {
    786		start = block->scan_hint_start + block->scan_hint;
    787		block->contig_hint_start = block->scan_hint_start;
    788		block->contig_hint = block->scan_hint;
    789		block->scan_hint = 0;
    790	} else {
    791		start = block->first_free;
    792		block->contig_hint = 0;
    793	}
    794
    795	block->right_free = 0;
    796
    797	/* iterate over free areas and update the contig hints */
    798	for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
    799		pcpu_block_update(block, start, end);
    800}
    801
    802/**
    803 * pcpu_block_update_hint_alloc - update hint on allocation path
    804 * @chunk: chunk of interest
    805 * @bit_off: chunk offset
    806 * @bits: size of request
    807 *
    808 * Updates metadata for the allocation path.  The metadata only has to be
    809 * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
    810 * scans are required if the block's contig hint is broken.
    811 */
    812static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
    813					 int bits)
    814{
    815	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
    816	int nr_empty_pages = 0;
    817	struct pcpu_block_md *s_block, *e_block, *block;
    818	int s_index, e_index;	/* block indexes of the freed allocation */
    819	int s_off, e_off;	/* block offsets of the freed allocation */
    820
    821	/*
    822	 * Calculate per block offsets.
    823	 * The calculation uses an inclusive range, but the resulting offsets
    824	 * are [start, end).  e_index always points to the last block in the
    825	 * range.
    826	 */
    827	s_index = pcpu_off_to_block_index(bit_off);
    828	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
    829	s_off = pcpu_off_to_block_off(bit_off);
    830	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
    831
    832	s_block = chunk->md_blocks + s_index;
    833	e_block = chunk->md_blocks + e_index;
    834
    835	/*
    836	 * Update s_block.
    837	 * block->first_free must be updated if the allocation takes its place.
    838	 * If the allocation breaks the contig_hint, a scan is required to
    839	 * restore this hint.
    840	 */
    841	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
    842		nr_empty_pages++;
    843
    844	if (s_off == s_block->first_free)
    845		s_block->first_free = find_next_zero_bit(
    846					pcpu_index_alloc_map(chunk, s_index),
    847					PCPU_BITMAP_BLOCK_BITS,
    848					s_off + bits);
    849
    850	if (pcpu_region_overlap(s_block->scan_hint_start,
    851				s_block->scan_hint_start + s_block->scan_hint,
    852				s_off,
    853				s_off + bits))
    854		s_block->scan_hint = 0;
    855
    856	if (pcpu_region_overlap(s_block->contig_hint_start,
    857				s_block->contig_hint_start +
    858				s_block->contig_hint,
    859				s_off,
    860				s_off + bits)) {
    861		/* block contig hint is broken - scan to fix it */
    862		if (!s_off)
    863			s_block->left_free = 0;
    864		pcpu_block_refresh_hint(chunk, s_index);
    865	} else {
    866		/* update left and right contig manually */
    867		s_block->left_free = min(s_block->left_free, s_off);
    868		if (s_index == e_index)
    869			s_block->right_free = min_t(int, s_block->right_free,
    870					PCPU_BITMAP_BLOCK_BITS - e_off);
    871		else
    872			s_block->right_free = 0;
    873	}
    874
    875	/*
    876	 * Update e_block.
    877	 */
    878	if (s_index != e_index) {
    879		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
    880			nr_empty_pages++;
    881
    882		/*
    883		 * When the allocation is across blocks, the end is along
    884		 * the left part of the e_block.
    885		 */
    886		e_block->first_free = find_next_zero_bit(
    887				pcpu_index_alloc_map(chunk, e_index),
    888				PCPU_BITMAP_BLOCK_BITS, e_off);
    889
    890		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
    891			/* reset the block */
    892			e_block++;
    893		} else {
    894			if (e_off > e_block->scan_hint_start)
    895				e_block->scan_hint = 0;
    896
    897			e_block->left_free = 0;
    898			if (e_off > e_block->contig_hint_start) {
    899				/* contig hint is broken - scan to fix it */
    900				pcpu_block_refresh_hint(chunk, e_index);
    901			} else {
    902				e_block->right_free =
    903					min_t(int, e_block->right_free,
    904					      PCPU_BITMAP_BLOCK_BITS - e_off);
    905			}
    906		}
    907
    908		/* update in-between md_blocks */
    909		nr_empty_pages += (e_index - s_index - 1);
    910		for (block = s_block + 1; block < e_block; block++) {
    911			block->scan_hint = 0;
    912			block->contig_hint = 0;
    913			block->left_free = 0;
    914			block->right_free = 0;
    915		}
    916	}
    917
    918	if (nr_empty_pages)
    919		pcpu_update_empty_pages(chunk, -nr_empty_pages);
    920
    921	if (pcpu_region_overlap(chunk_md->scan_hint_start,
    922				chunk_md->scan_hint_start +
    923				chunk_md->scan_hint,
    924				bit_off,
    925				bit_off + bits))
    926		chunk_md->scan_hint = 0;
    927
    928	/*
    929	 * The only time a full chunk scan is required is if the chunk
    930	 * contig hint is broken.  Otherwise, it means a smaller space
    931	 * was used and therefore the chunk contig hint is still correct.
    932	 */
    933	if (pcpu_region_overlap(chunk_md->contig_hint_start,
    934				chunk_md->contig_hint_start +
    935				chunk_md->contig_hint,
    936				bit_off,
    937				bit_off + bits))
    938		pcpu_chunk_refresh_hint(chunk, false);
    939}
    940
    941/**
    942 * pcpu_block_update_hint_free - updates the block hints on the free path
    943 * @chunk: chunk of interest
    944 * @bit_off: chunk offset
    945 * @bits: size of request
    946 *
    947 * Updates metadata for the allocation path.  This avoids a blind block
    948 * refresh by making use of the block contig hints.  If this fails, it scans
    949 * forward and backward to determine the extent of the free area.  This is
    950 * capped at the boundary of blocks.
    951 *
    952 * A chunk update is triggered if a page becomes free, a block becomes free,
    953 * or the free spans across blocks.  This tradeoff is to minimize iterating
    954 * over the block metadata to update chunk_md->contig_hint.
    955 * chunk_md->contig_hint may be off by up to a page, but it will never be more
    956 * than the available space.  If the contig hint is contained in one block, it
    957 * will be accurate.
    958 */
    959static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
    960					int bits)
    961{
    962	int nr_empty_pages = 0;
    963	struct pcpu_block_md *s_block, *e_block, *block;
    964	int s_index, e_index;	/* block indexes of the freed allocation */
    965	int s_off, e_off;	/* block offsets of the freed allocation */
    966	int start, end;		/* start and end of the whole free area */
    967
    968	/*
    969	 * Calculate per block offsets.
    970	 * The calculation uses an inclusive range, but the resulting offsets
    971	 * are [start, end).  e_index always points to the last block in the
    972	 * range.
    973	 */
    974	s_index = pcpu_off_to_block_index(bit_off);
    975	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
    976	s_off = pcpu_off_to_block_off(bit_off);
    977	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
    978
    979	s_block = chunk->md_blocks + s_index;
    980	e_block = chunk->md_blocks + e_index;
    981
    982	/*
    983	 * Check if the freed area aligns with the block->contig_hint.
    984	 * If it does, then the scan to find the beginning/end of the
    985	 * larger free area can be avoided.
    986	 *
    987	 * start and end refer to beginning and end of the free area
    988	 * within each their respective blocks.  This is not necessarily
    989	 * the entire free area as it may span blocks past the beginning
    990	 * or end of the block.
    991	 */
    992	start = s_off;
    993	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
    994		start = s_block->contig_hint_start;
    995	} else {
    996		/*
    997		 * Scan backwards to find the extent of the free area.
    998		 * find_last_bit returns the starting bit, so if the start bit
    999		 * is returned, that means there was no last bit and the
   1000		 * remainder of the chunk is free.
   1001		 */
   1002		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
   1003					  start);
   1004		start = (start == l_bit) ? 0 : l_bit + 1;
   1005	}
   1006
   1007	end = e_off;
   1008	if (e_off == e_block->contig_hint_start)
   1009		end = e_block->contig_hint_start + e_block->contig_hint;
   1010	else
   1011		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
   1012				    PCPU_BITMAP_BLOCK_BITS, end);
   1013
   1014	/* update s_block */
   1015	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
   1016	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
   1017		nr_empty_pages++;
   1018	pcpu_block_update(s_block, start, e_off);
   1019
   1020	/* freeing in the same block */
   1021	if (s_index != e_index) {
   1022		/* update e_block */
   1023		if (end == PCPU_BITMAP_BLOCK_BITS)
   1024			nr_empty_pages++;
   1025		pcpu_block_update(e_block, 0, end);
   1026
   1027		/* reset md_blocks in the middle */
   1028		nr_empty_pages += (e_index - s_index - 1);
   1029		for (block = s_block + 1; block < e_block; block++) {
   1030			block->first_free = 0;
   1031			block->scan_hint = 0;
   1032			block->contig_hint_start = 0;
   1033			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
   1034			block->left_free = PCPU_BITMAP_BLOCK_BITS;
   1035			block->right_free = PCPU_BITMAP_BLOCK_BITS;
   1036		}
   1037	}
   1038
   1039	if (nr_empty_pages)
   1040		pcpu_update_empty_pages(chunk, nr_empty_pages);
   1041
   1042	/*
   1043	 * Refresh chunk metadata when the free makes a block free or spans
   1044	 * across blocks.  The contig_hint may be off by up to a page, but if
   1045	 * the contig_hint is contained in a block, it will be accurate with
   1046	 * the else condition below.
   1047	 */
   1048	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
   1049		pcpu_chunk_refresh_hint(chunk, true);
   1050	else
   1051		pcpu_block_update(&chunk->chunk_md,
   1052				  pcpu_block_off_to_off(s_index, start),
   1053				  end);
   1054}
   1055
   1056/**
   1057 * pcpu_is_populated - determines if the region is populated
   1058 * @chunk: chunk of interest
   1059 * @bit_off: chunk offset
   1060 * @bits: size of area
   1061 * @next_off: return value for the next offset to start searching
   1062 *
   1063 * For atomic allocations, check if the backing pages are populated.
   1064 *
   1065 * RETURNS:
   1066 * Bool if the backing pages are populated.
   1067 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
   1068 */
   1069static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
   1070			      int *next_off)
   1071{
   1072	unsigned int start, end;
   1073
   1074	start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
   1075	end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
   1076
   1077	start = find_next_zero_bit(chunk->populated, end, start);
   1078	if (start >= end)
   1079		return true;
   1080
   1081	end = find_next_bit(chunk->populated, end, start + 1);
   1082
   1083	*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
   1084	return false;
   1085}
   1086
   1087/**
   1088 * pcpu_find_block_fit - finds the block index to start searching
   1089 * @chunk: chunk of interest
   1090 * @alloc_bits: size of request in allocation units
   1091 * @align: alignment of area (max PAGE_SIZE bytes)
   1092 * @pop_only: use populated regions only
   1093 *
   1094 * Given a chunk and an allocation spec, find the offset to begin searching
   1095 * for a free region.  This iterates over the bitmap metadata blocks to
   1096 * find an offset that will be guaranteed to fit the requirements.  It is
   1097 * not quite first fit as if the allocation does not fit in the contig hint
   1098 * of a block or chunk, it is skipped.  This errs on the side of caution
   1099 * to prevent excess iteration.  Poor alignment can cause the allocator to
   1100 * skip over blocks and chunks that have valid free areas.
   1101 *
   1102 * RETURNS:
   1103 * The offset in the bitmap to begin searching.
   1104 * -1 if no offset is found.
   1105 */
   1106static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
   1107			       size_t align, bool pop_only)
   1108{
   1109	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
   1110	int bit_off, bits, next_off;
   1111
   1112	/*
   1113	 * This is an optimization to prevent scanning by assuming if the
   1114	 * allocation cannot fit in the global hint, there is memory pressure
   1115	 * and creating a new chunk would happen soon.
   1116	 */
   1117	if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
   1118		return -1;
   1119
   1120	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
   1121	bits = 0;
   1122	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
   1123		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
   1124						   &next_off))
   1125			break;
   1126
   1127		bit_off = next_off;
   1128		bits = 0;
   1129	}
   1130
   1131	if (bit_off == pcpu_chunk_map_bits(chunk))
   1132		return -1;
   1133
   1134	return bit_off;
   1135}
   1136
   1137/*
   1138 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
   1139 * @map: the address to base the search on
   1140 * @size: the bitmap size in bits
   1141 * @start: the bitnumber to start searching at
   1142 * @nr: the number of zeroed bits we're looking for
   1143 * @align_mask: alignment mask for zero area
   1144 * @largest_off: offset of the largest area skipped
   1145 * @largest_bits: size of the largest area skipped
   1146 *
   1147 * The @align_mask should be one less than a power of 2.
   1148 *
   1149 * This is a modified version of bitmap_find_next_zero_area_off() to remember
   1150 * the largest area that was skipped.  This is imperfect, but in general is
   1151 * good enough.  The largest remembered region is the largest failed region
   1152 * seen.  This does not include anything we possibly skipped due to alignment.
   1153 * pcpu_block_update_scan() does scan backwards to try and recover what was
   1154 * lost to alignment.  While this can cause scanning to miss earlier possible
   1155 * free areas, smaller allocations will eventually fill those holes.
   1156 */
   1157static unsigned long pcpu_find_zero_area(unsigned long *map,
   1158					 unsigned long size,
   1159					 unsigned long start,
   1160					 unsigned long nr,
   1161					 unsigned long align_mask,
   1162					 unsigned long *largest_off,
   1163					 unsigned long *largest_bits)
   1164{
   1165	unsigned long index, end, i, area_off, area_bits;
   1166again:
   1167	index = find_next_zero_bit(map, size, start);
   1168
   1169	/* Align allocation */
   1170	index = __ALIGN_MASK(index, align_mask);
   1171	area_off = index;
   1172
   1173	end = index + nr;
   1174	if (end > size)
   1175		return end;
   1176	i = find_next_bit(map, end, index);
   1177	if (i < end) {
   1178		area_bits = i - area_off;
   1179		/* remember largest unused area with best alignment */
   1180		if (area_bits > *largest_bits ||
   1181		    (area_bits == *largest_bits && *largest_off &&
   1182		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
   1183			*largest_off = area_off;
   1184			*largest_bits = area_bits;
   1185		}
   1186
   1187		start = i + 1;
   1188		goto again;
   1189	}
   1190	return index;
   1191}
   1192
   1193/**
   1194 * pcpu_alloc_area - allocates an area from a pcpu_chunk
   1195 * @chunk: chunk of interest
   1196 * @alloc_bits: size of request in allocation units
   1197 * @align: alignment of area (max PAGE_SIZE)
   1198 * @start: bit_off to start searching
   1199 *
   1200 * This function takes in a @start offset to begin searching to fit an
   1201 * allocation of @alloc_bits with alignment @align.  It needs to scan
   1202 * the allocation map because if it fits within the block's contig hint,
   1203 * @start will be block->first_free. This is an attempt to fill the
   1204 * allocation prior to breaking the contig hint.  The allocation and
   1205 * boundary maps are updated accordingly if it confirms a valid
   1206 * free area.
   1207 *
   1208 * RETURNS:
   1209 * Allocated addr offset in @chunk on success.
   1210 * -1 if no matching area is found.
   1211 */
   1212static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
   1213			   size_t align, int start)
   1214{
   1215	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
   1216	size_t align_mask = (align) ? (align - 1) : 0;
   1217	unsigned long area_off = 0, area_bits = 0;
   1218	int bit_off, end, oslot;
   1219
   1220	lockdep_assert_held(&pcpu_lock);
   1221
   1222	oslot = pcpu_chunk_slot(chunk);
   1223
   1224	/*
   1225	 * Search to find a fit.
   1226	 */
   1227	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
   1228		    pcpu_chunk_map_bits(chunk));
   1229	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
   1230				      align_mask, &area_off, &area_bits);
   1231	if (bit_off >= end)
   1232		return -1;
   1233
   1234	if (area_bits)
   1235		pcpu_block_update_scan(chunk, area_off, area_bits);
   1236
   1237	/* update alloc map */
   1238	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
   1239
   1240	/* update boundary map */
   1241	set_bit(bit_off, chunk->bound_map);
   1242	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
   1243	set_bit(bit_off + alloc_bits, chunk->bound_map);
   1244
   1245	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
   1246
   1247	/* update first free bit */
   1248	if (bit_off == chunk_md->first_free)
   1249		chunk_md->first_free = find_next_zero_bit(
   1250					chunk->alloc_map,
   1251					pcpu_chunk_map_bits(chunk),
   1252					bit_off + alloc_bits);
   1253
   1254	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
   1255
   1256	pcpu_chunk_relocate(chunk, oslot);
   1257
   1258	return bit_off * PCPU_MIN_ALLOC_SIZE;
   1259}
   1260
   1261/**
   1262 * pcpu_free_area - frees the corresponding offset
   1263 * @chunk: chunk of interest
   1264 * @off: addr offset into chunk
   1265 *
   1266 * This function determines the size of an allocation to free using
   1267 * the boundary bitmap and clears the allocation map.
   1268 *
   1269 * RETURNS:
   1270 * Number of freed bytes.
   1271 */
   1272static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
   1273{
   1274	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
   1275	int bit_off, bits, end, oslot, freed;
   1276
   1277	lockdep_assert_held(&pcpu_lock);
   1278	pcpu_stats_area_dealloc(chunk);
   1279
   1280	oslot = pcpu_chunk_slot(chunk);
   1281
   1282	bit_off = off / PCPU_MIN_ALLOC_SIZE;
   1283
   1284	/* find end index */
   1285	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
   1286			    bit_off + 1);
   1287	bits = end - bit_off;
   1288	bitmap_clear(chunk->alloc_map, bit_off, bits);
   1289
   1290	freed = bits * PCPU_MIN_ALLOC_SIZE;
   1291
   1292	/* update metadata */
   1293	chunk->free_bytes += freed;
   1294
   1295	/* update first free bit */
   1296	chunk_md->first_free = min(chunk_md->first_free, bit_off);
   1297
   1298	pcpu_block_update_hint_free(chunk, bit_off, bits);
   1299
   1300	pcpu_chunk_relocate(chunk, oslot);
   1301
   1302	return freed;
   1303}
   1304
   1305static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
   1306{
   1307	block->scan_hint = 0;
   1308	block->contig_hint = nr_bits;
   1309	block->left_free = nr_bits;
   1310	block->right_free = nr_bits;
   1311	block->first_free = 0;
   1312	block->nr_bits = nr_bits;
   1313}
   1314
   1315static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
   1316{
   1317	struct pcpu_block_md *md_block;
   1318
   1319	/* init the chunk's block */
   1320	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
   1321
   1322	for (md_block = chunk->md_blocks;
   1323	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
   1324	     md_block++)
   1325		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
   1326}
   1327
   1328/**
   1329 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
   1330 * @tmp_addr: the start of the region served
   1331 * @map_size: size of the region served
   1332 *
   1333 * This is responsible for creating the chunks that serve the first chunk.  The
   1334 * base_addr is page aligned down of @tmp_addr while the region end is page
   1335 * aligned up.  Offsets are kept track of to determine the region served. All
   1336 * this is done to appease the bitmap allocator in avoiding partial blocks.
   1337 *
   1338 * RETURNS:
   1339 * Chunk serving the region at @tmp_addr of @map_size.
   1340 */
   1341static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
   1342							 int map_size)
   1343{
   1344	struct pcpu_chunk *chunk;
   1345	unsigned long aligned_addr, lcm_align;
   1346	int start_offset, offset_bits, region_size, region_bits;
   1347	size_t alloc_size;
   1348
   1349	/* region calculations */
   1350	aligned_addr = tmp_addr & PAGE_MASK;
   1351
   1352	start_offset = tmp_addr - aligned_addr;
   1353
   1354	/*
   1355	 * Align the end of the region with the LCM of PAGE_SIZE and
   1356	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
   1357	 * the other.
   1358	 */
   1359	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
   1360	region_size = ALIGN(start_offset + map_size, lcm_align);
   1361
   1362	/* allocate chunk */
   1363	alloc_size = struct_size(chunk, populated,
   1364				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
   1365	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   1366	if (!chunk)
   1367		panic("%s: Failed to allocate %zu bytes\n", __func__,
   1368		      alloc_size);
   1369
   1370	INIT_LIST_HEAD(&chunk->list);
   1371
   1372	chunk->base_addr = (void *)aligned_addr;
   1373	chunk->start_offset = start_offset;
   1374	chunk->end_offset = region_size - chunk->start_offset - map_size;
   1375
   1376	chunk->nr_pages = region_size >> PAGE_SHIFT;
   1377	region_bits = pcpu_chunk_map_bits(chunk);
   1378
   1379	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
   1380	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   1381	if (!chunk->alloc_map)
   1382		panic("%s: Failed to allocate %zu bytes\n", __func__,
   1383		      alloc_size);
   1384
   1385	alloc_size =
   1386		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
   1387	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   1388	if (!chunk->bound_map)
   1389		panic("%s: Failed to allocate %zu bytes\n", __func__,
   1390		      alloc_size);
   1391
   1392	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
   1393	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   1394	if (!chunk->md_blocks)
   1395		panic("%s: Failed to allocate %zu bytes\n", __func__,
   1396		      alloc_size);
   1397
   1398#ifdef CONFIG_MEMCG_KMEM
   1399	/* first chunk is free to use */
   1400	chunk->obj_cgroups = NULL;
   1401#endif
   1402	pcpu_init_md_blocks(chunk);
   1403
   1404	/* manage populated page bitmap */
   1405	chunk->immutable = true;
   1406	bitmap_fill(chunk->populated, chunk->nr_pages);
   1407	chunk->nr_populated = chunk->nr_pages;
   1408	chunk->nr_empty_pop_pages = chunk->nr_pages;
   1409
   1410	chunk->free_bytes = map_size;
   1411
   1412	if (chunk->start_offset) {
   1413		/* hide the beginning of the bitmap */
   1414		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
   1415		bitmap_set(chunk->alloc_map, 0, offset_bits);
   1416		set_bit(0, chunk->bound_map);
   1417		set_bit(offset_bits, chunk->bound_map);
   1418
   1419		chunk->chunk_md.first_free = offset_bits;
   1420
   1421		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
   1422	}
   1423
   1424	if (chunk->end_offset) {
   1425		/* hide the end of the bitmap */
   1426		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
   1427		bitmap_set(chunk->alloc_map,
   1428			   pcpu_chunk_map_bits(chunk) - offset_bits,
   1429			   offset_bits);
   1430		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
   1431			chunk->bound_map);
   1432		set_bit(region_bits, chunk->bound_map);
   1433
   1434		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
   1435					     - offset_bits, offset_bits);
   1436	}
   1437
   1438	return chunk;
   1439}
   1440
   1441static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
   1442{
   1443	struct pcpu_chunk *chunk;
   1444	int region_bits;
   1445
   1446	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
   1447	if (!chunk)
   1448		return NULL;
   1449
   1450	INIT_LIST_HEAD(&chunk->list);
   1451	chunk->nr_pages = pcpu_unit_pages;
   1452	region_bits = pcpu_chunk_map_bits(chunk);
   1453
   1454	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
   1455					   sizeof(chunk->alloc_map[0]), gfp);
   1456	if (!chunk->alloc_map)
   1457		goto alloc_map_fail;
   1458
   1459	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
   1460					   sizeof(chunk->bound_map[0]), gfp);
   1461	if (!chunk->bound_map)
   1462		goto bound_map_fail;
   1463
   1464	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
   1465					   sizeof(chunk->md_blocks[0]), gfp);
   1466	if (!chunk->md_blocks)
   1467		goto md_blocks_fail;
   1468
   1469#ifdef CONFIG_MEMCG_KMEM
   1470	if (!mem_cgroup_kmem_disabled()) {
   1471		chunk->obj_cgroups =
   1472			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
   1473					sizeof(struct obj_cgroup *), gfp);
   1474		if (!chunk->obj_cgroups)
   1475			goto objcg_fail;
   1476	}
   1477#endif
   1478
   1479	pcpu_init_md_blocks(chunk);
   1480
   1481	/* init metadata */
   1482	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
   1483
   1484	return chunk;
   1485
   1486#ifdef CONFIG_MEMCG_KMEM
   1487objcg_fail:
   1488	pcpu_mem_free(chunk->md_blocks);
   1489#endif
   1490md_blocks_fail:
   1491	pcpu_mem_free(chunk->bound_map);
   1492bound_map_fail:
   1493	pcpu_mem_free(chunk->alloc_map);
   1494alloc_map_fail:
   1495	pcpu_mem_free(chunk);
   1496
   1497	return NULL;
   1498}
   1499
   1500static void pcpu_free_chunk(struct pcpu_chunk *chunk)
   1501{
   1502	if (!chunk)
   1503		return;
   1504#ifdef CONFIG_MEMCG_KMEM
   1505	pcpu_mem_free(chunk->obj_cgroups);
   1506#endif
   1507	pcpu_mem_free(chunk->md_blocks);
   1508	pcpu_mem_free(chunk->bound_map);
   1509	pcpu_mem_free(chunk->alloc_map);
   1510	pcpu_mem_free(chunk);
   1511}
   1512
   1513/**
   1514 * pcpu_chunk_populated - post-population bookkeeping
   1515 * @chunk: pcpu_chunk which got populated
   1516 * @page_start: the start page
   1517 * @page_end: the end page
   1518 *
   1519 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
   1520 * the bookkeeping information accordingly.  Must be called after each
   1521 * successful population.
   1522 */
   1523static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
   1524				 int page_end)
   1525{
   1526	int nr = page_end - page_start;
   1527
   1528	lockdep_assert_held(&pcpu_lock);
   1529
   1530	bitmap_set(chunk->populated, page_start, nr);
   1531	chunk->nr_populated += nr;
   1532	pcpu_nr_populated += nr;
   1533
   1534	pcpu_update_empty_pages(chunk, nr);
   1535}
   1536
   1537/**
   1538 * pcpu_chunk_depopulated - post-depopulation bookkeeping
   1539 * @chunk: pcpu_chunk which got depopulated
   1540 * @page_start: the start page
   1541 * @page_end: the end page
   1542 *
   1543 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
   1544 * Update the bookkeeping information accordingly.  Must be called after
   1545 * each successful depopulation.
   1546 */
   1547static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
   1548				   int page_start, int page_end)
   1549{
   1550	int nr = page_end - page_start;
   1551
   1552	lockdep_assert_held(&pcpu_lock);
   1553
   1554	bitmap_clear(chunk->populated, page_start, nr);
   1555	chunk->nr_populated -= nr;
   1556	pcpu_nr_populated -= nr;
   1557
   1558	pcpu_update_empty_pages(chunk, -nr);
   1559}
   1560
   1561/*
   1562 * Chunk management implementation.
   1563 *
   1564 * To allow different implementations, chunk alloc/free and
   1565 * [de]population are implemented in a separate file which is pulled
   1566 * into this file and compiled together.  The following functions
   1567 * should be implemented.
   1568 *
   1569 * pcpu_populate_chunk		- populate the specified range of a chunk
   1570 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
   1571 * pcpu_post_unmap_tlb_flush	- flush tlb for the specified range of a chunk
   1572 * pcpu_create_chunk		- create a new chunk
   1573 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
   1574 * pcpu_addr_to_page		- translate address to physical address
   1575 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
   1576 */
   1577static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
   1578			       int page_start, int page_end, gfp_t gfp);
   1579static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
   1580				  int page_start, int page_end);
   1581static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
   1582				      int page_start, int page_end);
   1583static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
   1584static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
   1585static struct page *pcpu_addr_to_page(void *addr);
   1586static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
   1587
   1588#ifdef CONFIG_NEED_PER_CPU_KM
   1589#include "percpu-km.c"
   1590#else
   1591#include "percpu-vm.c"
   1592#endif
   1593
   1594/**
   1595 * pcpu_chunk_addr_search - determine chunk containing specified address
   1596 * @addr: address for which the chunk needs to be determined.
   1597 *
   1598 * This is an internal function that handles all but static allocations.
   1599 * Static percpu address values should never be passed into the allocator.
   1600 *
   1601 * RETURNS:
   1602 * The address of the found chunk.
   1603 */
   1604static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
   1605{
   1606	/* is it in the dynamic region (first chunk)? */
   1607	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
   1608		return pcpu_first_chunk;
   1609
   1610	/* is it in the reserved region? */
   1611	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
   1612		return pcpu_reserved_chunk;
   1613
   1614	/*
   1615	 * The address is relative to unit0 which might be unused and
   1616	 * thus unmapped.  Offset the address to the unit space of the
   1617	 * current processor before looking it up in the vmalloc
   1618	 * space.  Note that any possible cpu id can be used here, so
   1619	 * there's no need to worry about preemption or cpu hotplug.
   1620	 */
   1621	addr += pcpu_unit_offsets[raw_smp_processor_id()];
   1622	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
   1623}
   1624
   1625#ifdef CONFIG_MEMCG_KMEM
   1626static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
   1627				      struct obj_cgroup **objcgp)
   1628{
   1629	struct obj_cgroup *objcg;
   1630
   1631	if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
   1632		return true;
   1633
   1634	objcg = get_obj_cgroup_from_current();
   1635	if (!objcg)
   1636		return true;
   1637
   1638	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size))) {
   1639		obj_cgroup_put(objcg);
   1640		return false;
   1641	}
   1642
   1643	*objcgp = objcg;
   1644	return true;
   1645}
   1646
   1647static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
   1648				       struct pcpu_chunk *chunk, int off,
   1649				       size_t size)
   1650{
   1651	if (!objcg)
   1652		return;
   1653
   1654	if (likely(chunk && chunk->obj_cgroups)) {
   1655		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
   1656
   1657		rcu_read_lock();
   1658		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
   1659				pcpu_obj_full_size(size));
   1660		rcu_read_unlock();
   1661	} else {
   1662		obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
   1663		obj_cgroup_put(objcg);
   1664	}
   1665}
   1666
   1667static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
   1668{
   1669	struct obj_cgroup *objcg;
   1670
   1671	if (unlikely(!chunk->obj_cgroups))
   1672		return;
   1673
   1674	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
   1675	if (!objcg)
   1676		return;
   1677	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
   1678
   1679	obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
   1680
   1681	rcu_read_lock();
   1682	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
   1683			-pcpu_obj_full_size(size));
   1684	rcu_read_unlock();
   1685
   1686	obj_cgroup_put(objcg);
   1687}
   1688
   1689#else /* CONFIG_MEMCG_KMEM */
   1690static bool
   1691pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
   1692{
   1693	return true;
   1694}
   1695
   1696static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
   1697				       struct pcpu_chunk *chunk, int off,
   1698				       size_t size)
   1699{
   1700}
   1701
   1702static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
   1703{
   1704}
   1705#endif /* CONFIG_MEMCG_KMEM */
   1706
   1707/**
   1708 * pcpu_alloc - the percpu allocator
   1709 * @size: size of area to allocate in bytes
   1710 * @align: alignment of area (max PAGE_SIZE)
   1711 * @reserved: allocate from the reserved chunk if available
   1712 * @gfp: allocation flags
   1713 *
   1714 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
   1715 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
   1716 * then no warning will be triggered on invalid or failed allocation
   1717 * requests.
   1718 *
   1719 * RETURNS:
   1720 * Percpu pointer to the allocated area on success, NULL on failure.
   1721 */
   1722static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
   1723				 gfp_t gfp)
   1724{
   1725	gfp_t pcpu_gfp;
   1726	bool is_atomic;
   1727	bool do_warn;
   1728	struct obj_cgroup *objcg = NULL;
   1729	static int warn_limit = 10;
   1730	struct pcpu_chunk *chunk, *next;
   1731	const char *err;
   1732	int slot, off, cpu, ret;
   1733	unsigned long flags;
   1734	void __percpu *ptr;
   1735	size_t bits, bit_align;
   1736
   1737	gfp = current_gfp_context(gfp);
   1738	/* whitelisted flags that can be passed to the backing allocators */
   1739	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
   1740	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
   1741	do_warn = !(gfp & __GFP_NOWARN);
   1742
   1743	/*
   1744	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
   1745	 * therefore alignment must be a minimum of that many bytes.
   1746	 * An allocation may have internal fragmentation from rounding up
   1747	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
   1748	 */
   1749	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
   1750		align = PCPU_MIN_ALLOC_SIZE;
   1751
   1752	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
   1753	bits = size >> PCPU_MIN_ALLOC_SHIFT;
   1754	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
   1755
   1756	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
   1757		     !is_power_of_2(align))) {
   1758		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
   1759		     size, align);
   1760		return NULL;
   1761	}
   1762
   1763	if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
   1764		return NULL;
   1765
   1766	if (!is_atomic) {
   1767		/*
   1768		 * pcpu_balance_workfn() allocates memory under this mutex,
   1769		 * and it may wait for memory reclaim. Allow current task
   1770		 * to become OOM victim, in case of memory pressure.
   1771		 */
   1772		if (gfp & __GFP_NOFAIL) {
   1773			mutex_lock(&pcpu_alloc_mutex);
   1774		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
   1775			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
   1776			return NULL;
   1777		}
   1778	}
   1779
   1780	spin_lock_irqsave(&pcpu_lock, flags);
   1781
   1782	/* serve reserved allocations from the reserved chunk if available */
   1783	if (reserved && pcpu_reserved_chunk) {
   1784		chunk = pcpu_reserved_chunk;
   1785
   1786		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
   1787		if (off < 0) {
   1788			err = "alloc from reserved chunk failed";
   1789			goto fail_unlock;
   1790		}
   1791
   1792		off = pcpu_alloc_area(chunk, bits, bit_align, off);
   1793		if (off >= 0)
   1794			goto area_found;
   1795
   1796		err = "alloc from reserved chunk failed";
   1797		goto fail_unlock;
   1798	}
   1799
   1800restart:
   1801	/* search through normal chunks */
   1802	for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
   1803		list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
   1804					 list) {
   1805			off = pcpu_find_block_fit(chunk, bits, bit_align,
   1806						  is_atomic);
   1807			if (off < 0) {
   1808				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
   1809					pcpu_chunk_move(chunk, 0);
   1810				continue;
   1811			}
   1812
   1813			off = pcpu_alloc_area(chunk, bits, bit_align, off);
   1814			if (off >= 0) {
   1815				pcpu_reintegrate_chunk(chunk);
   1816				goto area_found;
   1817			}
   1818		}
   1819	}
   1820
   1821	spin_unlock_irqrestore(&pcpu_lock, flags);
   1822
   1823	/*
   1824	 * No space left.  Create a new chunk.  We don't want multiple
   1825	 * tasks to create chunks simultaneously.  Serialize and create iff
   1826	 * there's still no empty chunk after grabbing the mutex.
   1827	 */
   1828	if (is_atomic) {
   1829		err = "atomic alloc failed, no space left";
   1830		goto fail;
   1831	}
   1832
   1833	if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
   1834		chunk = pcpu_create_chunk(pcpu_gfp);
   1835		if (!chunk) {
   1836			err = "failed to allocate new chunk";
   1837			goto fail;
   1838		}
   1839
   1840		spin_lock_irqsave(&pcpu_lock, flags);
   1841		pcpu_chunk_relocate(chunk, -1);
   1842	} else {
   1843		spin_lock_irqsave(&pcpu_lock, flags);
   1844	}
   1845
   1846	goto restart;
   1847
   1848area_found:
   1849	pcpu_stats_area_alloc(chunk, size);
   1850	spin_unlock_irqrestore(&pcpu_lock, flags);
   1851
   1852	/* populate if not all pages are already there */
   1853	if (!is_atomic) {
   1854		unsigned int page_end, rs, re;
   1855
   1856		rs = PFN_DOWN(off);
   1857		page_end = PFN_UP(off + size);
   1858
   1859		for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
   1860			WARN_ON(chunk->immutable);
   1861
   1862			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
   1863
   1864			spin_lock_irqsave(&pcpu_lock, flags);
   1865			if (ret) {
   1866				pcpu_free_area(chunk, off);
   1867				err = "failed to populate";
   1868				goto fail_unlock;
   1869			}
   1870			pcpu_chunk_populated(chunk, rs, re);
   1871			spin_unlock_irqrestore(&pcpu_lock, flags);
   1872		}
   1873
   1874		mutex_unlock(&pcpu_alloc_mutex);
   1875	}
   1876
   1877	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
   1878		pcpu_schedule_balance_work();
   1879
   1880	/* clear the areas and return address relative to base address */
   1881	for_each_possible_cpu(cpu)
   1882		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
   1883
   1884	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
   1885	kmemleak_alloc_percpu(ptr, size, gfp);
   1886
   1887	trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
   1888				  chunk->base_addr, off, ptr,
   1889				  pcpu_obj_full_size(size), gfp);
   1890
   1891	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
   1892
   1893	return ptr;
   1894
   1895fail_unlock:
   1896	spin_unlock_irqrestore(&pcpu_lock, flags);
   1897fail:
   1898	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
   1899
   1900	if (!is_atomic && do_warn && warn_limit) {
   1901		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
   1902			size, align, is_atomic, err);
   1903		dump_stack();
   1904		if (!--warn_limit)
   1905			pr_info("limit reached, disable warning\n");
   1906	}
   1907	if (is_atomic) {
   1908		/* see the flag handling in pcpu_balance_workfn() */
   1909		pcpu_atomic_alloc_failed = true;
   1910		pcpu_schedule_balance_work();
   1911	} else {
   1912		mutex_unlock(&pcpu_alloc_mutex);
   1913	}
   1914
   1915	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
   1916
   1917	return NULL;
   1918}
   1919
   1920/**
   1921 * __alloc_percpu_gfp - allocate dynamic percpu area
   1922 * @size: size of area to allocate in bytes
   1923 * @align: alignment of area (max PAGE_SIZE)
   1924 * @gfp: allocation flags
   1925 *
   1926 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
   1927 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
   1928 * be called from any context but is a lot more likely to fail. If @gfp
   1929 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
   1930 * allocation requests.
   1931 *
   1932 * RETURNS:
   1933 * Percpu pointer to the allocated area on success, NULL on failure.
   1934 */
   1935void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
   1936{
   1937	return pcpu_alloc(size, align, false, gfp);
   1938}
   1939EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
   1940
   1941/**
   1942 * __alloc_percpu - allocate dynamic percpu area
   1943 * @size: size of area to allocate in bytes
   1944 * @align: alignment of area (max PAGE_SIZE)
   1945 *
   1946 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
   1947 */
   1948void __percpu *__alloc_percpu(size_t size, size_t align)
   1949{
   1950	return pcpu_alloc(size, align, false, GFP_KERNEL);
   1951}
   1952EXPORT_SYMBOL_GPL(__alloc_percpu);
   1953
   1954/**
   1955 * __alloc_reserved_percpu - allocate reserved percpu area
   1956 * @size: size of area to allocate in bytes
   1957 * @align: alignment of area (max PAGE_SIZE)
   1958 *
   1959 * Allocate zero-filled percpu area of @size bytes aligned at @align
   1960 * from reserved percpu area if arch has set it up; otherwise,
   1961 * allocation is served from the same dynamic area.  Might sleep.
   1962 * Might trigger writeouts.
   1963 *
   1964 * CONTEXT:
   1965 * Does GFP_KERNEL allocation.
   1966 *
   1967 * RETURNS:
   1968 * Percpu pointer to the allocated area on success, NULL on failure.
   1969 */
   1970void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
   1971{
   1972	return pcpu_alloc(size, align, true, GFP_KERNEL);
   1973}
   1974
   1975/**
   1976 * pcpu_balance_free - manage the amount of free chunks
   1977 * @empty_only: free chunks only if there are no populated pages
   1978 *
   1979 * If empty_only is %false, reclaim all fully free chunks regardless of the
   1980 * number of populated pages.  Otherwise, only reclaim chunks that have no
   1981 * populated pages.
   1982 *
   1983 * CONTEXT:
   1984 * pcpu_lock (can be dropped temporarily)
   1985 */
   1986static void pcpu_balance_free(bool empty_only)
   1987{
   1988	LIST_HEAD(to_free);
   1989	struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
   1990	struct pcpu_chunk *chunk, *next;
   1991
   1992	lockdep_assert_held(&pcpu_lock);
   1993
   1994	/*
   1995	 * There's no reason to keep around multiple unused chunks and VM
   1996	 * areas can be scarce.  Destroy all free chunks except for one.
   1997	 */
   1998	list_for_each_entry_safe(chunk, next, free_head, list) {
   1999		WARN_ON(chunk->immutable);
   2000
   2001		/* spare the first one */
   2002		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
   2003			continue;
   2004
   2005		if (!empty_only || chunk->nr_empty_pop_pages == 0)
   2006			list_move(&chunk->list, &to_free);
   2007	}
   2008
   2009	if (list_empty(&to_free))
   2010		return;
   2011
   2012	spin_unlock_irq(&pcpu_lock);
   2013	list_for_each_entry_safe(chunk, next, &to_free, list) {
   2014		unsigned int rs, re;
   2015
   2016		for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
   2017			pcpu_depopulate_chunk(chunk, rs, re);
   2018			spin_lock_irq(&pcpu_lock);
   2019			pcpu_chunk_depopulated(chunk, rs, re);
   2020			spin_unlock_irq(&pcpu_lock);
   2021		}
   2022		pcpu_destroy_chunk(chunk);
   2023		cond_resched();
   2024	}
   2025	spin_lock_irq(&pcpu_lock);
   2026}
   2027
   2028/**
   2029 * pcpu_balance_populated - manage the amount of populated pages
   2030 *
   2031 * Maintain a certain amount of populated pages to satisfy atomic allocations.
   2032 * It is possible that this is called when physical memory is scarce causing
   2033 * OOM killer to be triggered.  We should avoid doing so until an actual
   2034 * allocation causes the failure as it is possible that requests can be
   2035 * serviced from already backed regions.
   2036 *
   2037 * CONTEXT:
   2038 * pcpu_lock (can be dropped temporarily)
   2039 */
   2040static void pcpu_balance_populated(void)
   2041{
   2042	/* gfp flags passed to underlying allocators */
   2043	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
   2044	struct pcpu_chunk *chunk;
   2045	int slot, nr_to_pop, ret;
   2046
   2047	lockdep_assert_held(&pcpu_lock);
   2048
   2049	/*
   2050	 * Ensure there are certain number of free populated pages for
   2051	 * atomic allocs.  Fill up from the most packed so that atomic
   2052	 * allocs don't increase fragmentation.  If atomic allocation
   2053	 * failed previously, always populate the maximum amount.  This
   2054	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
   2055	 * failing indefinitely; however, large atomic allocs are not
   2056	 * something we support properly and can be highly unreliable and
   2057	 * inefficient.
   2058	 */
   2059retry_pop:
   2060	if (pcpu_atomic_alloc_failed) {
   2061		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
   2062		/* best effort anyway, don't worry about synchronization */
   2063		pcpu_atomic_alloc_failed = false;
   2064	} else {
   2065		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
   2066				  pcpu_nr_empty_pop_pages,
   2067				  0, PCPU_EMPTY_POP_PAGES_HIGH);
   2068	}
   2069
   2070	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
   2071		unsigned int nr_unpop = 0, rs, re;
   2072
   2073		if (!nr_to_pop)
   2074			break;
   2075
   2076		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
   2077			nr_unpop = chunk->nr_pages - chunk->nr_populated;
   2078			if (nr_unpop)
   2079				break;
   2080		}
   2081
   2082		if (!nr_unpop)
   2083			continue;
   2084
   2085		/* @chunk can't go away while pcpu_alloc_mutex is held */
   2086		for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
   2087			int nr = min_t(int, re - rs, nr_to_pop);
   2088
   2089			spin_unlock_irq(&pcpu_lock);
   2090			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
   2091			cond_resched();
   2092			spin_lock_irq(&pcpu_lock);
   2093			if (!ret) {
   2094				nr_to_pop -= nr;
   2095				pcpu_chunk_populated(chunk, rs, rs + nr);
   2096			} else {
   2097				nr_to_pop = 0;
   2098			}
   2099
   2100			if (!nr_to_pop)
   2101				break;
   2102		}
   2103	}
   2104
   2105	if (nr_to_pop) {
   2106		/* ran out of chunks to populate, create a new one and retry */
   2107		spin_unlock_irq(&pcpu_lock);
   2108		chunk = pcpu_create_chunk(gfp);
   2109		cond_resched();
   2110		spin_lock_irq(&pcpu_lock);
   2111		if (chunk) {
   2112			pcpu_chunk_relocate(chunk, -1);
   2113			goto retry_pop;
   2114		}
   2115	}
   2116}
   2117
   2118/**
   2119 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
   2120 *
   2121 * Scan over chunks in the depopulate list and try to release unused populated
   2122 * pages back to the system.  Depopulated chunks are sidelined to prevent
   2123 * repopulating these pages unless required.  Fully free chunks are reintegrated
   2124 * and freed accordingly (1 is kept around).  If we drop below the empty
   2125 * populated pages threshold, reintegrate the chunk if it has empty free pages.
   2126 * Each chunk is scanned in the reverse order to keep populated pages close to
   2127 * the beginning of the chunk.
   2128 *
   2129 * CONTEXT:
   2130 * pcpu_lock (can be dropped temporarily)
   2131 *
   2132 */
   2133static void pcpu_reclaim_populated(void)
   2134{
   2135	struct pcpu_chunk *chunk;
   2136	struct pcpu_block_md *block;
   2137	int freed_page_start, freed_page_end;
   2138	int i, end;
   2139	bool reintegrate;
   2140
   2141	lockdep_assert_held(&pcpu_lock);
   2142
   2143	/*
   2144	 * Once a chunk is isolated to the to_depopulate list, the chunk is no
   2145	 * longer discoverable to allocations whom may populate pages.  The only
   2146	 * other accessor is the free path which only returns area back to the
   2147	 * allocator not touching the populated bitmap.
   2148	 */
   2149	while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
   2150		chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
   2151					 struct pcpu_chunk, list);
   2152		WARN_ON(chunk->immutable);
   2153
   2154		/*
   2155		 * Scan chunk's pages in the reverse order to keep populated
   2156		 * pages close to the beginning of the chunk.
   2157		 */
   2158		freed_page_start = chunk->nr_pages;
   2159		freed_page_end = 0;
   2160		reintegrate = false;
   2161		for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
   2162			/* no more work to do */
   2163			if (chunk->nr_empty_pop_pages == 0)
   2164				break;
   2165
   2166			/* reintegrate chunk to prevent atomic alloc failures */
   2167			if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
   2168				reintegrate = true;
   2169				goto end_chunk;
   2170			}
   2171
   2172			/*
   2173			 * If the page is empty and populated, start or
   2174			 * extend the (i, end) range.  If i == 0, decrease
   2175			 * i and perform the depopulation to cover the last
   2176			 * (first) page in the chunk.
   2177			 */
   2178			block = chunk->md_blocks + i;
   2179			if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
   2180			    test_bit(i, chunk->populated)) {
   2181				if (end == -1)
   2182					end = i;
   2183				if (i > 0)
   2184					continue;
   2185				i--;
   2186			}
   2187
   2188			/* depopulate if there is an active range */
   2189			if (end == -1)
   2190				continue;
   2191
   2192			spin_unlock_irq(&pcpu_lock);
   2193			pcpu_depopulate_chunk(chunk, i + 1, end + 1);
   2194			cond_resched();
   2195			spin_lock_irq(&pcpu_lock);
   2196
   2197			pcpu_chunk_depopulated(chunk, i + 1, end + 1);
   2198			freed_page_start = min(freed_page_start, i + 1);
   2199			freed_page_end = max(freed_page_end, end + 1);
   2200
   2201			/* reset the range and continue */
   2202			end = -1;
   2203		}
   2204
   2205end_chunk:
   2206		/* batch tlb flush per chunk to amortize cost */
   2207		if (freed_page_start < freed_page_end) {
   2208			spin_unlock_irq(&pcpu_lock);
   2209			pcpu_post_unmap_tlb_flush(chunk,
   2210						  freed_page_start,
   2211						  freed_page_end);
   2212			cond_resched();
   2213			spin_lock_irq(&pcpu_lock);
   2214		}
   2215
   2216		if (reintegrate || chunk->free_bytes == pcpu_unit_size)
   2217			pcpu_reintegrate_chunk(chunk);
   2218		else
   2219			list_move_tail(&chunk->list,
   2220				       &pcpu_chunk_lists[pcpu_sidelined_slot]);
   2221	}
   2222}
   2223
   2224/**
   2225 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
   2226 * @work: unused
   2227 *
   2228 * For each chunk type, manage the number of fully free chunks and the number of
   2229 * populated pages.  An important thing to consider is when pages are freed and
   2230 * how they contribute to the global counts.
   2231 */
   2232static void pcpu_balance_workfn(struct work_struct *work)
   2233{
   2234	/*
   2235	 * pcpu_balance_free() is called twice because the first time we may
   2236	 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
   2237	 * to grow other chunks.  This then gives pcpu_reclaim_populated() time
   2238	 * to move fully free chunks to the active list to be freed if
   2239	 * appropriate.
   2240	 */
   2241	mutex_lock(&pcpu_alloc_mutex);
   2242	spin_lock_irq(&pcpu_lock);
   2243
   2244	pcpu_balance_free(false);
   2245	pcpu_reclaim_populated();
   2246	pcpu_balance_populated();
   2247	pcpu_balance_free(true);
   2248
   2249	spin_unlock_irq(&pcpu_lock);
   2250	mutex_unlock(&pcpu_alloc_mutex);
   2251}
   2252
   2253/**
   2254 * free_percpu - free percpu area
   2255 * @ptr: pointer to area to free
   2256 *
   2257 * Free percpu area @ptr.
   2258 *
   2259 * CONTEXT:
   2260 * Can be called from atomic context.
   2261 */
   2262void free_percpu(void __percpu *ptr)
   2263{
   2264	void *addr;
   2265	struct pcpu_chunk *chunk;
   2266	unsigned long flags;
   2267	int size, off;
   2268	bool need_balance = false;
   2269
   2270	if (!ptr)
   2271		return;
   2272
   2273	kmemleak_free_percpu(ptr);
   2274
   2275	addr = __pcpu_ptr_to_addr(ptr);
   2276
   2277	spin_lock_irqsave(&pcpu_lock, flags);
   2278
   2279	chunk = pcpu_chunk_addr_search(addr);
   2280	off = addr - chunk->base_addr;
   2281
   2282	size = pcpu_free_area(chunk, off);
   2283
   2284	pcpu_memcg_free_hook(chunk, off, size);
   2285
   2286	/*
   2287	 * If there are more than one fully free chunks, wake up grim reaper.
   2288	 * If the chunk is isolated, it may be in the process of being
   2289	 * reclaimed.  Let reclaim manage cleaning up of that chunk.
   2290	 */
   2291	if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
   2292		struct pcpu_chunk *pos;
   2293
   2294		list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
   2295			if (pos != chunk) {
   2296				need_balance = true;
   2297				break;
   2298			}
   2299	} else if (pcpu_should_reclaim_chunk(chunk)) {
   2300		pcpu_isolate_chunk(chunk);
   2301		need_balance = true;
   2302	}
   2303
   2304	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
   2305
   2306	spin_unlock_irqrestore(&pcpu_lock, flags);
   2307
   2308	if (need_balance)
   2309		pcpu_schedule_balance_work();
   2310}
   2311EXPORT_SYMBOL_GPL(free_percpu);
   2312
   2313bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
   2314{
   2315#ifdef CONFIG_SMP
   2316	const size_t static_size = __per_cpu_end - __per_cpu_start;
   2317	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
   2318	unsigned int cpu;
   2319
   2320	for_each_possible_cpu(cpu) {
   2321		void *start = per_cpu_ptr(base, cpu);
   2322		void *va = (void *)addr;
   2323
   2324		if (va >= start && va < start + static_size) {
   2325			if (can_addr) {
   2326				*can_addr = (unsigned long) (va - start);
   2327				*can_addr += (unsigned long)
   2328					per_cpu_ptr(base, get_boot_cpu_id());
   2329			}
   2330			return true;
   2331		}
   2332	}
   2333#endif
   2334	/* on UP, can't distinguish from other static vars, always false */
   2335	return false;
   2336}
   2337
   2338/**
   2339 * is_kernel_percpu_address - test whether address is from static percpu area
   2340 * @addr: address to test
   2341 *
   2342 * Test whether @addr belongs to in-kernel static percpu area.  Module
   2343 * static percpu areas are not considered.  For those, use
   2344 * is_module_percpu_address().
   2345 *
   2346 * RETURNS:
   2347 * %true if @addr is from in-kernel static percpu area, %false otherwise.
   2348 */
   2349bool is_kernel_percpu_address(unsigned long addr)
   2350{
   2351	return __is_kernel_percpu_address(addr, NULL);
   2352}
   2353
   2354/**
   2355 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
   2356 * @addr: the address to be converted to physical address
   2357 *
   2358 * Given @addr which is dereferenceable address obtained via one of
   2359 * percpu access macros, this function translates it into its physical
   2360 * address.  The caller is responsible for ensuring @addr stays valid
   2361 * until this function finishes.
   2362 *
   2363 * percpu allocator has special setup for the first chunk, which currently
   2364 * supports either embedding in linear address space or vmalloc mapping,
   2365 * and, from the second one, the backing allocator (currently either vm or
   2366 * km) provides translation.
   2367 *
   2368 * The addr can be translated simply without checking if it falls into the
   2369 * first chunk. But the current code reflects better how percpu allocator
   2370 * actually works, and the verification can discover both bugs in percpu
   2371 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
   2372 * code.
   2373 *
   2374 * RETURNS:
   2375 * The physical address for @addr.
   2376 */
   2377phys_addr_t per_cpu_ptr_to_phys(void *addr)
   2378{
   2379	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
   2380	bool in_first_chunk = false;
   2381	unsigned long first_low, first_high;
   2382	unsigned int cpu;
   2383
   2384	/*
   2385	 * The following test on unit_low/high isn't strictly
   2386	 * necessary but will speed up lookups of addresses which
   2387	 * aren't in the first chunk.
   2388	 *
   2389	 * The address check is against full chunk sizes.  pcpu_base_addr
   2390	 * points to the beginning of the first chunk including the
   2391	 * static region.  Assumes good intent as the first chunk may
   2392	 * not be full (ie. < pcpu_unit_pages in size).
   2393	 */
   2394	first_low = (unsigned long)pcpu_base_addr +
   2395		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
   2396	first_high = (unsigned long)pcpu_base_addr +
   2397		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
   2398	if ((unsigned long)addr >= first_low &&
   2399	    (unsigned long)addr < first_high) {
   2400		for_each_possible_cpu(cpu) {
   2401			void *start = per_cpu_ptr(base, cpu);
   2402
   2403			if (addr >= start && addr < start + pcpu_unit_size) {
   2404				in_first_chunk = true;
   2405				break;
   2406			}
   2407		}
   2408	}
   2409
   2410	if (in_first_chunk) {
   2411		if (!is_vmalloc_addr(addr))
   2412			return __pa(addr);
   2413		else
   2414			return page_to_phys(vmalloc_to_page(addr)) +
   2415			       offset_in_page(addr);
   2416	} else
   2417		return page_to_phys(pcpu_addr_to_page(addr)) +
   2418		       offset_in_page(addr);
   2419}
   2420
   2421/**
   2422 * pcpu_alloc_alloc_info - allocate percpu allocation info
   2423 * @nr_groups: the number of groups
   2424 * @nr_units: the number of units
   2425 *
   2426 * Allocate ai which is large enough for @nr_groups groups containing
   2427 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
   2428 * cpu_map array which is long enough for @nr_units and filled with
   2429 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
   2430 * pointer of other groups.
   2431 *
   2432 * RETURNS:
   2433 * Pointer to the allocated pcpu_alloc_info on success, NULL on
   2434 * failure.
   2435 */
   2436struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
   2437						      int nr_units)
   2438{
   2439	struct pcpu_alloc_info *ai;
   2440	size_t base_size, ai_size;
   2441	void *ptr;
   2442	int unit;
   2443
   2444	base_size = ALIGN(struct_size(ai, groups, nr_groups),
   2445			  __alignof__(ai->groups[0].cpu_map[0]));
   2446	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
   2447
   2448	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
   2449	if (!ptr)
   2450		return NULL;
   2451	ai = ptr;
   2452	ptr += base_size;
   2453
   2454	ai->groups[0].cpu_map = ptr;
   2455
   2456	for (unit = 0; unit < nr_units; unit++)
   2457		ai->groups[0].cpu_map[unit] = NR_CPUS;
   2458
   2459	ai->nr_groups = nr_groups;
   2460	ai->__ai_size = PFN_ALIGN(ai_size);
   2461
   2462	return ai;
   2463}
   2464
   2465/**
   2466 * pcpu_free_alloc_info - free percpu allocation info
   2467 * @ai: pcpu_alloc_info to free
   2468 *
   2469 * Free @ai which was allocated by pcpu_alloc_alloc_info().
   2470 */
   2471void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
   2472{
   2473	memblock_free(ai, ai->__ai_size);
   2474}
   2475
   2476/**
   2477 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
   2478 * @lvl: loglevel
   2479 * @ai: allocation info to dump
   2480 *
   2481 * Print out information about @ai using loglevel @lvl.
   2482 */
   2483static void pcpu_dump_alloc_info(const char *lvl,
   2484				 const struct pcpu_alloc_info *ai)
   2485{
   2486	int group_width = 1, cpu_width = 1, width;
   2487	char empty_str[] = "--------";
   2488	int alloc = 0, alloc_end = 0;
   2489	int group, v;
   2490	int upa, apl;	/* units per alloc, allocs per line */
   2491
   2492	v = ai->nr_groups;
   2493	while (v /= 10)
   2494		group_width++;
   2495
   2496	v = num_possible_cpus();
   2497	while (v /= 10)
   2498		cpu_width++;
   2499	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
   2500
   2501	upa = ai->alloc_size / ai->unit_size;
   2502	width = upa * (cpu_width + 1) + group_width + 3;
   2503	apl = rounddown_pow_of_two(max(60 / width, 1));
   2504
   2505	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
   2506	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
   2507	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
   2508
   2509	for (group = 0; group < ai->nr_groups; group++) {
   2510		const struct pcpu_group_info *gi = &ai->groups[group];
   2511		int unit = 0, unit_end = 0;
   2512
   2513		BUG_ON(gi->nr_units % upa);
   2514		for (alloc_end += gi->nr_units / upa;
   2515		     alloc < alloc_end; alloc++) {
   2516			if (!(alloc % apl)) {
   2517				pr_cont("\n");
   2518				printk("%spcpu-alloc: ", lvl);
   2519			}
   2520			pr_cont("[%0*d] ", group_width, group);
   2521
   2522			for (unit_end += upa; unit < unit_end; unit++)
   2523				if (gi->cpu_map[unit] != NR_CPUS)
   2524					pr_cont("%0*d ",
   2525						cpu_width, gi->cpu_map[unit]);
   2526				else
   2527					pr_cont("%s ", empty_str);
   2528		}
   2529	}
   2530	pr_cont("\n");
   2531}
   2532
   2533/**
   2534 * pcpu_setup_first_chunk - initialize the first percpu chunk
   2535 * @ai: pcpu_alloc_info describing how to percpu area is shaped
   2536 * @base_addr: mapped address
   2537 *
   2538 * Initialize the first percpu chunk which contains the kernel static
   2539 * percpu area.  This function is to be called from arch percpu area
   2540 * setup path.
   2541 *
   2542 * @ai contains all information necessary to initialize the first
   2543 * chunk and prime the dynamic percpu allocator.
   2544 *
   2545 * @ai->static_size is the size of static percpu area.
   2546 *
   2547 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
   2548 * reserve after the static area in the first chunk.  This reserves
   2549 * the first chunk such that it's available only through reserved
   2550 * percpu allocation.  This is primarily used to serve module percpu
   2551 * static areas on architectures where the addressing model has
   2552 * limited offset range for symbol relocations to guarantee module
   2553 * percpu symbols fall inside the relocatable range.
   2554 *
   2555 * @ai->dyn_size determines the number of bytes available for dynamic
   2556 * allocation in the first chunk.  The area between @ai->static_size +
   2557 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
   2558 *
   2559 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
   2560 * and equal to or larger than @ai->static_size + @ai->reserved_size +
   2561 * @ai->dyn_size.
   2562 *
   2563 * @ai->atom_size is the allocation atom size and used as alignment
   2564 * for vm areas.
   2565 *
   2566 * @ai->alloc_size is the allocation size and always multiple of
   2567 * @ai->atom_size.  This is larger than @ai->atom_size if
   2568 * @ai->unit_size is larger than @ai->atom_size.
   2569 *
   2570 * @ai->nr_groups and @ai->groups describe virtual memory layout of
   2571 * percpu areas.  Units which should be colocated are put into the
   2572 * same group.  Dynamic VM areas will be allocated according to these
   2573 * groupings.  If @ai->nr_groups is zero, a single group containing
   2574 * all units is assumed.
   2575 *
   2576 * The caller should have mapped the first chunk at @base_addr and
   2577 * copied static data to each unit.
   2578 *
   2579 * The first chunk will always contain a static and a dynamic region.
   2580 * However, the static region is not managed by any chunk.  If the first
   2581 * chunk also contains a reserved region, it is served by two chunks -
   2582 * one for the reserved region and one for the dynamic region.  They
   2583 * share the same vm, but use offset regions in the area allocation map.
   2584 * The chunk serving the dynamic region is circulated in the chunk slots
   2585 * and available for dynamic allocation like any other chunk.
   2586 */
   2587void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
   2588				   void *base_addr)
   2589{
   2590	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
   2591	size_t static_size, dyn_size;
   2592	struct pcpu_chunk *chunk;
   2593	unsigned long *group_offsets;
   2594	size_t *group_sizes;
   2595	unsigned long *unit_off;
   2596	unsigned int cpu;
   2597	int *unit_map;
   2598	int group, unit, i;
   2599	int map_size;
   2600	unsigned long tmp_addr;
   2601	size_t alloc_size;
   2602
   2603#define PCPU_SETUP_BUG_ON(cond)	do {					\
   2604	if (unlikely(cond)) {						\
   2605		pr_emerg("failed to initialize, %s\n", #cond);		\
   2606		pr_emerg("cpu_possible_mask=%*pb\n",			\
   2607			 cpumask_pr_args(cpu_possible_mask));		\
   2608		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
   2609		BUG();							\
   2610	}								\
   2611} while (0)
   2612
   2613	/* sanity checks */
   2614	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
   2615#ifdef CONFIG_SMP
   2616	PCPU_SETUP_BUG_ON(!ai->static_size);
   2617	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
   2618#endif
   2619	PCPU_SETUP_BUG_ON(!base_addr);
   2620	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
   2621	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
   2622	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
   2623	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
   2624	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
   2625	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
   2626	PCPU_SETUP_BUG_ON(!ai->dyn_size);
   2627	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
   2628	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
   2629			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
   2630	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
   2631
   2632	/* process group information and build config tables accordingly */
   2633	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
   2634	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   2635	if (!group_offsets)
   2636		panic("%s: Failed to allocate %zu bytes\n", __func__,
   2637		      alloc_size);
   2638
   2639	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
   2640	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   2641	if (!group_sizes)
   2642		panic("%s: Failed to allocate %zu bytes\n", __func__,
   2643		      alloc_size);
   2644
   2645	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
   2646	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   2647	if (!unit_map)
   2648		panic("%s: Failed to allocate %zu bytes\n", __func__,
   2649		      alloc_size);
   2650
   2651	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
   2652	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
   2653	if (!unit_off)
   2654		panic("%s: Failed to allocate %zu bytes\n", __func__,
   2655		      alloc_size);
   2656
   2657	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
   2658		unit_map[cpu] = UINT_MAX;
   2659
   2660	pcpu_low_unit_cpu = NR_CPUS;
   2661	pcpu_high_unit_cpu = NR_CPUS;
   2662
   2663	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
   2664		const struct pcpu_group_info *gi = &ai->groups[group];
   2665
   2666		group_offsets[group] = gi->base_offset;
   2667		group_sizes[group] = gi->nr_units * ai->unit_size;
   2668
   2669		for (i = 0; i < gi->nr_units; i++) {
   2670			cpu = gi->cpu_map[i];
   2671			if (cpu == NR_CPUS)
   2672				continue;
   2673
   2674			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
   2675			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
   2676			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
   2677
   2678			unit_map[cpu] = unit + i;
   2679			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
   2680
   2681			/* determine low/high unit_cpu */
   2682			if (pcpu_low_unit_cpu == NR_CPUS ||
   2683			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
   2684				pcpu_low_unit_cpu = cpu;
   2685			if (pcpu_high_unit_cpu == NR_CPUS ||
   2686			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
   2687				pcpu_high_unit_cpu = cpu;
   2688		}
   2689	}
   2690	pcpu_nr_units = unit;
   2691
   2692	for_each_possible_cpu(cpu)
   2693		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
   2694
   2695	/* we're done parsing the input, undefine BUG macro and dump config */
   2696#undef PCPU_SETUP_BUG_ON
   2697	pcpu_dump_alloc_info(KERN_DEBUG, ai);
   2698
   2699	pcpu_nr_groups = ai->nr_groups;
   2700	pcpu_group_offsets = group_offsets;
   2701	pcpu_group_sizes = group_sizes;
   2702	pcpu_unit_map = unit_map;
   2703	pcpu_unit_offsets = unit_off;
   2704
   2705	/* determine basic parameters */
   2706	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
   2707	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
   2708	pcpu_atom_size = ai->atom_size;
   2709	pcpu_chunk_struct_size = struct_size(chunk, populated,
   2710					     BITS_TO_LONGS(pcpu_unit_pages));
   2711
   2712	pcpu_stats_save_ai(ai);
   2713
   2714	/*
   2715	 * Allocate chunk slots.  The slots after the active slots are:
   2716	 *   sidelined_slot - isolated, depopulated chunks
   2717	 *   free_slot - fully free chunks
   2718	 *   to_depopulate_slot - isolated, chunks to depopulate
   2719	 */
   2720	pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
   2721	pcpu_free_slot = pcpu_sidelined_slot + 1;
   2722	pcpu_to_depopulate_slot = pcpu_free_slot + 1;
   2723	pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
   2724	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
   2725					  sizeof(pcpu_chunk_lists[0]),
   2726					  SMP_CACHE_BYTES);
   2727	if (!pcpu_chunk_lists)
   2728		panic("%s: Failed to allocate %zu bytes\n", __func__,
   2729		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
   2730
   2731	for (i = 0; i < pcpu_nr_slots; i++)
   2732		INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
   2733
   2734	/*
   2735	 * The end of the static region needs to be aligned with the
   2736	 * minimum allocation size as this offsets the reserved and
   2737	 * dynamic region.  The first chunk ends page aligned by
   2738	 * expanding the dynamic region, therefore the dynamic region
   2739	 * can be shrunk to compensate while still staying above the
   2740	 * configured sizes.
   2741	 */
   2742	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
   2743	dyn_size = ai->dyn_size - (static_size - ai->static_size);
   2744
   2745	/*
   2746	 * Initialize first chunk.
   2747	 * If the reserved_size is non-zero, this initializes the reserved
   2748	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
   2749	 * and the dynamic region is initialized here.  The first chunk,
   2750	 * pcpu_first_chunk, will always point to the chunk that serves
   2751	 * the dynamic region.
   2752	 */
   2753	tmp_addr = (unsigned long)base_addr + static_size;
   2754	map_size = ai->reserved_size ?: dyn_size;
   2755	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
   2756
   2757	/* init dynamic chunk if necessary */
   2758	if (ai->reserved_size) {
   2759		pcpu_reserved_chunk = chunk;
   2760
   2761		tmp_addr = (unsigned long)base_addr + static_size +
   2762			   ai->reserved_size;
   2763		map_size = dyn_size;
   2764		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
   2765	}
   2766
   2767	/* link the first chunk in */
   2768	pcpu_first_chunk = chunk;
   2769	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
   2770	pcpu_chunk_relocate(pcpu_first_chunk, -1);
   2771
   2772	/* include all regions of the first chunk */
   2773	pcpu_nr_populated += PFN_DOWN(size_sum);
   2774
   2775	pcpu_stats_chunk_alloc();
   2776	trace_percpu_create_chunk(base_addr);
   2777
   2778	/* we're done */
   2779	pcpu_base_addr = base_addr;
   2780}
   2781
   2782#ifdef CONFIG_SMP
   2783
   2784const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
   2785	[PCPU_FC_AUTO]	= "auto",
   2786	[PCPU_FC_EMBED]	= "embed",
   2787	[PCPU_FC_PAGE]	= "page",
   2788};
   2789
   2790enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
   2791
   2792static int __init percpu_alloc_setup(char *str)
   2793{
   2794	if (!str)
   2795		return -EINVAL;
   2796
   2797	if (0)
   2798		/* nada */;
   2799#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
   2800	else if (!strcmp(str, "embed"))
   2801		pcpu_chosen_fc = PCPU_FC_EMBED;
   2802#endif
   2803#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
   2804	else if (!strcmp(str, "page"))
   2805		pcpu_chosen_fc = PCPU_FC_PAGE;
   2806#endif
   2807	else
   2808		pr_warn("unknown allocator %s specified\n", str);
   2809
   2810	return 0;
   2811}
   2812early_param("percpu_alloc", percpu_alloc_setup);
   2813
   2814/*
   2815 * pcpu_embed_first_chunk() is used by the generic percpu setup.
   2816 * Build it if needed by the arch config or the generic setup is going
   2817 * to be used.
   2818 */
   2819#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
   2820	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
   2821#define BUILD_EMBED_FIRST_CHUNK
   2822#endif
   2823
   2824/* build pcpu_page_first_chunk() iff needed by the arch config */
   2825#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
   2826#define BUILD_PAGE_FIRST_CHUNK
   2827#endif
   2828
   2829/* pcpu_build_alloc_info() is used by both embed and page first chunk */
   2830#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
   2831/**
   2832 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
   2833 * @reserved_size: the size of reserved percpu area in bytes
   2834 * @dyn_size: minimum free size for dynamic allocation in bytes
   2835 * @atom_size: allocation atom size
   2836 * @cpu_distance_fn: callback to determine distance between cpus, optional
   2837 *
   2838 * This function determines grouping of units, their mappings to cpus
   2839 * and other parameters considering needed percpu size, allocation
   2840 * atom size and distances between CPUs.
   2841 *
   2842 * Groups are always multiples of atom size and CPUs which are of
   2843 * LOCAL_DISTANCE both ways are grouped together and share space for
   2844 * units in the same group.  The returned configuration is guaranteed
   2845 * to have CPUs on different nodes on different groups and >=75% usage
   2846 * of allocated virtual address space.
   2847 *
   2848 * RETURNS:
   2849 * On success, pointer to the new allocation_info is returned.  On
   2850 * failure, ERR_PTR value is returned.
   2851 */
   2852static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
   2853				size_t reserved_size, size_t dyn_size,
   2854				size_t atom_size,
   2855				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
   2856{
   2857	static int group_map[NR_CPUS] __initdata;
   2858	static int group_cnt[NR_CPUS] __initdata;
   2859	static struct cpumask mask __initdata;
   2860	const size_t static_size = __per_cpu_end - __per_cpu_start;
   2861	int nr_groups = 1, nr_units = 0;
   2862	size_t size_sum, min_unit_size, alloc_size;
   2863	int upa, max_upa, best_upa;	/* units_per_alloc */
   2864	int last_allocs, group, unit;
   2865	unsigned int cpu, tcpu;
   2866	struct pcpu_alloc_info *ai;
   2867	unsigned int *cpu_map;
   2868
   2869	/* this function may be called multiple times */
   2870	memset(group_map, 0, sizeof(group_map));
   2871	memset(group_cnt, 0, sizeof(group_cnt));
   2872	cpumask_clear(&mask);
   2873
   2874	/* calculate size_sum and ensure dyn_size is enough for early alloc */
   2875	size_sum = PFN_ALIGN(static_size + reserved_size +
   2876			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
   2877	dyn_size = size_sum - static_size - reserved_size;
   2878
   2879	/*
   2880	 * Determine min_unit_size, alloc_size and max_upa such that
   2881	 * alloc_size is multiple of atom_size and is the smallest
   2882	 * which can accommodate 4k aligned segments which are equal to
   2883	 * or larger than min_unit_size.
   2884	 */
   2885	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
   2886
   2887	/* determine the maximum # of units that can fit in an allocation */
   2888	alloc_size = roundup(min_unit_size, atom_size);
   2889	upa = alloc_size / min_unit_size;
   2890	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
   2891		upa--;
   2892	max_upa = upa;
   2893
   2894	cpumask_copy(&mask, cpu_possible_mask);
   2895
   2896	/* group cpus according to their proximity */
   2897	for (group = 0; !cpumask_empty(&mask); group++) {
   2898		/* pop the group's first cpu */
   2899		cpu = cpumask_first(&mask);
   2900		group_map[cpu] = group;
   2901		group_cnt[group]++;
   2902		cpumask_clear_cpu(cpu, &mask);
   2903
   2904		for_each_cpu(tcpu, &mask) {
   2905			if (!cpu_distance_fn ||
   2906			    (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
   2907			     cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
   2908				group_map[tcpu] = group;
   2909				group_cnt[group]++;
   2910				cpumask_clear_cpu(tcpu, &mask);
   2911			}
   2912		}
   2913	}
   2914	nr_groups = group;
   2915
   2916	/*
   2917	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
   2918	 * Expand the unit_size until we use >= 75% of the units allocated.
   2919	 * Related to atom_size, which could be much larger than the unit_size.
   2920	 */
   2921	last_allocs = INT_MAX;
   2922	best_upa = 0;
   2923	for (upa = max_upa; upa; upa--) {
   2924		int allocs = 0, wasted = 0;
   2925
   2926		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
   2927			continue;
   2928
   2929		for (group = 0; group < nr_groups; group++) {
   2930			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
   2931			allocs += this_allocs;
   2932			wasted += this_allocs * upa - group_cnt[group];
   2933		}
   2934
   2935		/*
   2936		 * Don't accept if wastage is over 1/3.  The
   2937		 * greater-than comparison ensures upa==1 always
   2938		 * passes the following check.
   2939		 */
   2940		if (wasted > num_possible_cpus() / 3)
   2941			continue;
   2942
   2943		/* and then don't consume more memory */
   2944		if (allocs > last_allocs)
   2945			break;
   2946		last_allocs = allocs;
   2947		best_upa = upa;
   2948	}
   2949	BUG_ON(!best_upa);
   2950	upa = best_upa;
   2951
   2952	/* allocate and fill alloc_info */
   2953	for (group = 0; group < nr_groups; group++)
   2954		nr_units += roundup(group_cnt[group], upa);
   2955
   2956	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
   2957	if (!ai)
   2958		return ERR_PTR(-ENOMEM);
   2959	cpu_map = ai->groups[0].cpu_map;
   2960
   2961	for (group = 0; group < nr_groups; group++) {
   2962		ai->groups[group].cpu_map = cpu_map;
   2963		cpu_map += roundup(group_cnt[group], upa);
   2964	}
   2965
   2966	ai->static_size = static_size;
   2967	ai->reserved_size = reserved_size;
   2968	ai->dyn_size = dyn_size;
   2969	ai->unit_size = alloc_size / upa;
   2970	ai->atom_size = atom_size;
   2971	ai->alloc_size = alloc_size;
   2972
   2973	for (group = 0, unit = 0; group < nr_groups; group++) {
   2974		struct pcpu_group_info *gi = &ai->groups[group];
   2975
   2976		/*
   2977		 * Initialize base_offset as if all groups are located
   2978		 * back-to-back.  The caller should update this to
   2979		 * reflect actual allocation.
   2980		 */
   2981		gi->base_offset = unit * ai->unit_size;
   2982
   2983		for_each_possible_cpu(cpu)
   2984			if (group_map[cpu] == group)
   2985				gi->cpu_map[gi->nr_units++] = cpu;
   2986		gi->nr_units = roundup(gi->nr_units, upa);
   2987		unit += gi->nr_units;
   2988	}
   2989	BUG_ON(unit != nr_units);
   2990
   2991	return ai;
   2992}
   2993
   2994static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
   2995				   pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
   2996{
   2997	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
   2998#ifdef CONFIG_NUMA
   2999	int node = NUMA_NO_NODE;
   3000	void *ptr;
   3001
   3002	if (cpu_to_nd_fn)
   3003		node = cpu_to_nd_fn(cpu);
   3004
   3005	if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
   3006		ptr = memblock_alloc_from(size, align, goal);
   3007		pr_info("cpu %d has no node %d or node-local memory\n",
   3008			cpu, node);
   3009		pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
   3010			 cpu, size, (u64)__pa(ptr));
   3011	} else {
   3012		ptr = memblock_alloc_try_nid(size, align, goal,
   3013					     MEMBLOCK_ALLOC_ACCESSIBLE,
   3014					     node);
   3015
   3016		pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
   3017			 cpu, size, node, (u64)__pa(ptr));
   3018	}
   3019	return ptr;
   3020#else
   3021	return memblock_alloc_from(size, align, goal);
   3022#endif
   3023}
   3024
   3025static void __init pcpu_fc_free(void *ptr, size_t size)
   3026{
   3027	memblock_free(ptr, size);
   3028}
   3029#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
   3030
   3031#if defined(BUILD_EMBED_FIRST_CHUNK)
   3032/**
   3033 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
   3034 * @reserved_size: the size of reserved percpu area in bytes
   3035 * @dyn_size: minimum free size for dynamic allocation in bytes
   3036 * @atom_size: allocation atom size
   3037 * @cpu_distance_fn: callback to determine distance between cpus, optional
   3038 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
   3039 *
   3040 * This is a helper to ease setting up embedded first percpu chunk and
   3041 * can be called where pcpu_setup_first_chunk() is expected.
   3042 *
   3043 * If this function is used to setup the first chunk, it is allocated
   3044 * by calling pcpu_fc_alloc and used as-is without being mapped into
   3045 * vmalloc area.  Allocations are always whole multiples of @atom_size
   3046 * aligned to @atom_size.
   3047 *
   3048 * This enables the first chunk to piggy back on the linear physical
   3049 * mapping which often uses larger page size.  Please note that this
   3050 * can result in very sparse cpu->unit mapping on NUMA machines thus
   3051 * requiring large vmalloc address space.  Don't use this allocator if
   3052 * vmalloc space is not orders of magnitude larger than distances
   3053 * between node memory addresses (ie. 32bit NUMA machines).
   3054 *
   3055 * @dyn_size specifies the minimum dynamic area size.
   3056 *
   3057 * If the needed size is smaller than the minimum or specified unit
   3058 * size, the leftover is returned using pcpu_fc_free.
   3059 *
   3060 * RETURNS:
   3061 * 0 on success, -errno on failure.
   3062 */
   3063int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
   3064				  size_t atom_size,
   3065				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
   3066				  pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
   3067{
   3068	void *base = (void *)ULONG_MAX;
   3069	void **areas = NULL;
   3070	struct pcpu_alloc_info *ai;
   3071	size_t size_sum, areas_size;
   3072	unsigned long max_distance;
   3073	int group, i, highest_group, rc = 0;
   3074
   3075	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
   3076				   cpu_distance_fn);
   3077	if (IS_ERR(ai))
   3078		return PTR_ERR(ai);
   3079
   3080	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
   3081	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
   3082
   3083	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
   3084	if (!areas) {
   3085		rc = -ENOMEM;
   3086		goto out_free;
   3087	}
   3088
   3089	/* allocate, copy and determine base address & max_distance */
   3090	highest_group = 0;
   3091	for (group = 0; group < ai->nr_groups; group++) {
   3092		struct pcpu_group_info *gi = &ai->groups[group];
   3093		unsigned int cpu = NR_CPUS;
   3094		void *ptr;
   3095
   3096		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
   3097			cpu = gi->cpu_map[i];
   3098		BUG_ON(cpu == NR_CPUS);
   3099
   3100		/* allocate space for the whole group */
   3101		ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
   3102		if (!ptr) {
   3103			rc = -ENOMEM;
   3104			goto out_free_areas;
   3105		}
   3106		/* kmemleak tracks the percpu allocations separately */
   3107		kmemleak_free(ptr);
   3108		areas[group] = ptr;
   3109
   3110		base = min(ptr, base);
   3111		if (ptr > areas[highest_group])
   3112			highest_group = group;
   3113	}
   3114	max_distance = areas[highest_group] - base;
   3115	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
   3116
   3117	/* warn if maximum distance is further than 75% of vmalloc space */
   3118	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
   3119		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
   3120				max_distance, VMALLOC_TOTAL);
   3121#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
   3122		/* and fail if we have fallback */
   3123		rc = -EINVAL;
   3124		goto out_free_areas;
   3125#endif
   3126	}
   3127
   3128	/*
   3129	 * Copy data and free unused parts.  This should happen after all
   3130	 * allocations are complete; otherwise, we may end up with
   3131	 * overlapping groups.
   3132	 */
   3133	for (group = 0; group < ai->nr_groups; group++) {
   3134		struct pcpu_group_info *gi = &ai->groups[group];
   3135		void *ptr = areas[group];
   3136
   3137		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
   3138			if (gi->cpu_map[i] == NR_CPUS) {
   3139				/* unused unit, free whole */
   3140				pcpu_fc_free(ptr, ai->unit_size);
   3141				continue;
   3142			}
   3143			/* copy and return the unused part */
   3144			memcpy(ptr, __per_cpu_load, ai->static_size);
   3145			pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
   3146		}
   3147	}
   3148
   3149	/* base address is now known, determine group base offsets */
   3150	for (group = 0; group < ai->nr_groups; group++) {
   3151		ai->groups[group].base_offset = areas[group] - base;
   3152	}
   3153
   3154	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
   3155		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
   3156		ai->dyn_size, ai->unit_size);
   3157
   3158	pcpu_setup_first_chunk(ai, base);
   3159	goto out_free;
   3160
   3161out_free_areas:
   3162	for (group = 0; group < ai->nr_groups; group++)
   3163		if (areas[group])
   3164			pcpu_fc_free(areas[group],
   3165				ai->groups[group].nr_units * ai->unit_size);
   3166out_free:
   3167	pcpu_free_alloc_info(ai);
   3168	if (areas)
   3169		memblock_free(areas, areas_size);
   3170	return rc;
   3171}
   3172#endif /* BUILD_EMBED_FIRST_CHUNK */
   3173
   3174#ifdef BUILD_PAGE_FIRST_CHUNK
   3175#include <asm/pgalloc.h>
   3176
   3177#ifndef P4D_TABLE_SIZE
   3178#define P4D_TABLE_SIZE PAGE_SIZE
   3179#endif
   3180
   3181#ifndef PUD_TABLE_SIZE
   3182#define PUD_TABLE_SIZE PAGE_SIZE
   3183#endif
   3184
   3185#ifndef PMD_TABLE_SIZE
   3186#define PMD_TABLE_SIZE PAGE_SIZE
   3187#endif
   3188
   3189#ifndef PTE_TABLE_SIZE
   3190#define PTE_TABLE_SIZE PAGE_SIZE
   3191#endif
   3192void __init __weak pcpu_populate_pte(unsigned long addr)
   3193{
   3194	pgd_t *pgd = pgd_offset_k(addr);
   3195	p4d_t *p4d;
   3196	pud_t *pud;
   3197	pmd_t *pmd;
   3198
   3199	if (pgd_none(*pgd)) {
   3200		p4d_t *new;
   3201
   3202		new = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
   3203		if (!new)
   3204			goto err_alloc;
   3205		pgd_populate(&init_mm, pgd, new);
   3206	}
   3207
   3208	p4d = p4d_offset(pgd, addr);
   3209	if (p4d_none(*p4d)) {
   3210		pud_t *new;
   3211
   3212		new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
   3213		if (!new)
   3214			goto err_alloc;
   3215		p4d_populate(&init_mm, p4d, new);
   3216	}
   3217
   3218	pud = pud_offset(p4d, addr);
   3219	if (pud_none(*pud)) {
   3220		pmd_t *new;
   3221
   3222		new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
   3223		if (!new)
   3224			goto err_alloc;
   3225		pud_populate(&init_mm, pud, new);
   3226	}
   3227
   3228	pmd = pmd_offset(pud, addr);
   3229	if (!pmd_present(*pmd)) {
   3230		pte_t *new;
   3231
   3232		new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
   3233		if (!new)
   3234			goto err_alloc;
   3235		pmd_populate_kernel(&init_mm, pmd, new);
   3236	}
   3237
   3238	return;
   3239
   3240err_alloc:
   3241	panic("%s: Failed to allocate memory\n", __func__);
   3242}
   3243
   3244/**
   3245 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
   3246 * @reserved_size: the size of reserved percpu area in bytes
   3247 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
   3248 *
   3249 * This is a helper to ease setting up page-remapped first percpu
   3250 * chunk and can be called where pcpu_setup_first_chunk() is expected.
   3251 *
   3252 * This is the basic allocator.  Static percpu area is allocated
   3253 * page-by-page into vmalloc area.
   3254 *
   3255 * RETURNS:
   3256 * 0 on success, -errno on failure.
   3257 */
   3258int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
   3259{
   3260	static struct vm_struct vm;
   3261	struct pcpu_alloc_info *ai;
   3262	char psize_str[16];
   3263	int unit_pages;
   3264	size_t pages_size;
   3265	struct page **pages;
   3266	int unit, i, j, rc = 0;
   3267	int upa;
   3268	int nr_g0_units;
   3269
   3270	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
   3271
   3272	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
   3273	if (IS_ERR(ai))
   3274		return PTR_ERR(ai);
   3275	BUG_ON(ai->nr_groups != 1);
   3276	upa = ai->alloc_size/ai->unit_size;
   3277	nr_g0_units = roundup(num_possible_cpus(), upa);
   3278	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
   3279		pcpu_free_alloc_info(ai);
   3280		return -EINVAL;
   3281	}
   3282
   3283	unit_pages = ai->unit_size >> PAGE_SHIFT;
   3284
   3285	/* unaligned allocations can't be freed, round up to page size */
   3286	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
   3287			       sizeof(pages[0]));
   3288	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
   3289	if (!pages)
   3290		panic("%s: Failed to allocate %zu bytes\n", __func__,
   3291		      pages_size);
   3292
   3293	/* allocate pages */
   3294	j = 0;
   3295	for (unit = 0; unit < num_possible_cpus(); unit++) {
   3296		unsigned int cpu = ai->groups[0].cpu_map[unit];
   3297		for (i = 0; i < unit_pages; i++) {
   3298			void *ptr;
   3299
   3300			ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
   3301			if (!ptr) {
   3302				pr_warn("failed to allocate %s page for cpu%u\n",
   3303						psize_str, cpu);
   3304				goto enomem;
   3305			}
   3306			/* kmemleak tracks the percpu allocations separately */
   3307			kmemleak_free(ptr);
   3308			pages[j++] = virt_to_page(ptr);
   3309		}
   3310	}
   3311
   3312	/* allocate vm area, map the pages and copy static data */
   3313	vm.flags = VM_ALLOC;
   3314	vm.size = num_possible_cpus() * ai->unit_size;
   3315	vm_area_register_early(&vm, PAGE_SIZE);
   3316
   3317	for (unit = 0; unit < num_possible_cpus(); unit++) {
   3318		unsigned long unit_addr =
   3319			(unsigned long)vm.addr + unit * ai->unit_size;
   3320
   3321		for (i = 0; i < unit_pages; i++)
   3322			pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
   3323
   3324		/* pte already populated, the following shouldn't fail */
   3325		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
   3326				      unit_pages);
   3327		if (rc < 0)
   3328			panic("failed to map percpu area, err=%d\n", rc);
   3329
   3330		/*
   3331		 * FIXME: Archs with virtual cache should flush local
   3332		 * cache for the linear mapping here - something
   3333		 * equivalent to flush_cache_vmap() on the local cpu.
   3334		 * flush_cache_vmap() can't be used as most supporting
   3335		 * data structures are not set up yet.
   3336		 */
   3337
   3338		/* copy static data */
   3339		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
   3340	}
   3341
   3342	/* we're ready, commit */
   3343	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
   3344		unit_pages, psize_str, ai->static_size,
   3345		ai->reserved_size, ai->dyn_size);
   3346
   3347	pcpu_setup_first_chunk(ai, vm.addr);
   3348	goto out_free_ar;
   3349
   3350enomem:
   3351	while (--j >= 0)
   3352		pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
   3353	rc = -ENOMEM;
   3354out_free_ar:
   3355	memblock_free(pages, pages_size);
   3356	pcpu_free_alloc_info(ai);
   3357	return rc;
   3358}
   3359#endif /* BUILD_PAGE_FIRST_CHUNK */
   3360
   3361#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
   3362/*
   3363 * Generic SMP percpu area setup.
   3364 *
   3365 * The embedding helper is used because its behavior closely resembles
   3366 * the original non-dynamic generic percpu area setup.  This is
   3367 * important because many archs have addressing restrictions and might
   3368 * fail if the percpu area is located far away from the previous
   3369 * location.  As an added bonus, in non-NUMA cases, embedding is
   3370 * generally a good idea TLB-wise because percpu area can piggy back
   3371 * on the physical linear memory mapping which uses large page
   3372 * mappings on applicable archs.
   3373 */
   3374unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
   3375EXPORT_SYMBOL(__per_cpu_offset);
   3376
   3377void __init setup_per_cpu_areas(void)
   3378{
   3379	unsigned long delta;
   3380	unsigned int cpu;
   3381	int rc;
   3382
   3383	/*
   3384	 * Always reserve area for module percpu variables.  That's
   3385	 * what the legacy allocator did.
   3386	 */
   3387	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
   3388				    PAGE_SIZE, NULL, NULL);
   3389	if (rc < 0)
   3390		panic("Failed to initialize percpu areas.");
   3391
   3392	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
   3393	for_each_possible_cpu(cpu)
   3394		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
   3395}
   3396#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
   3397
   3398#else	/* CONFIG_SMP */
   3399
   3400/*
   3401 * UP percpu area setup.
   3402 *
   3403 * UP always uses km-based percpu allocator with identity mapping.
   3404 * Static percpu variables are indistinguishable from the usual static
   3405 * variables and don't require any special preparation.
   3406 */
   3407void __init setup_per_cpu_areas(void)
   3408{
   3409	const size_t unit_size =
   3410		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
   3411					 PERCPU_DYNAMIC_RESERVE));
   3412	struct pcpu_alloc_info *ai;
   3413	void *fc;
   3414
   3415	ai = pcpu_alloc_alloc_info(1, 1);
   3416	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
   3417	if (!ai || !fc)
   3418		panic("Failed to allocate memory for percpu areas.");
   3419	/* kmemleak tracks the percpu allocations separately */
   3420	kmemleak_free(fc);
   3421
   3422	ai->dyn_size = unit_size;
   3423	ai->unit_size = unit_size;
   3424	ai->atom_size = unit_size;
   3425	ai->alloc_size = unit_size;
   3426	ai->groups[0].nr_units = 1;
   3427	ai->groups[0].cpu_map[0] = 0;
   3428
   3429	pcpu_setup_first_chunk(ai, fc);
   3430	pcpu_free_alloc_info(ai);
   3431}
   3432
   3433#endif	/* CONFIG_SMP */
   3434
   3435/*
   3436 * pcpu_nr_pages - calculate total number of populated backing pages
   3437 *
   3438 * This reflects the number of pages populated to back chunks.  Metadata is
   3439 * excluded in the number exposed in meminfo as the number of backing pages
   3440 * scales with the number of cpus and can quickly outweigh the memory used for
   3441 * metadata.  It also keeps this calculation nice and simple.
   3442 *
   3443 * RETURNS:
   3444 * Total number of populated backing pages in use by the allocator.
   3445 */
   3446unsigned long pcpu_nr_pages(void)
   3447{
   3448	return pcpu_nr_populated * pcpu_nr_units;
   3449}
   3450
   3451/*
   3452 * Percpu allocator is initialized early during boot when neither slab or
   3453 * workqueue is available.  Plug async management until everything is up
   3454 * and running.
   3455 */
   3456static int __init percpu_enable_async(void)
   3457{
   3458	pcpu_async_enabled = true;
   3459	return 0;
   3460}
   3461subsys_initcall(percpu_enable_async);