cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

internal.h (27617B)


      1/* SPDX-License-Identifier: GPL-2.0-or-later */
      2/* internal.h: mm/ internal definitions
      3 *
      4 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
      5 * Written by David Howells (dhowells@redhat.com)
      6 */
      7#ifndef __MM_INTERNAL_H
      8#define __MM_INTERNAL_H
      9
     10#include <linux/fs.h>
     11#include <linux/mm.h>
     12#include <linux/pagemap.h>
     13#include <linux/rmap.h>
     14#include <linux/tracepoint-defs.h>
     15
     16struct folio_batch;
     17
     18/*
     19 * The set of flags that only affect watermark checking and reclaim
     20 * behaviour. This is used by the MM to obey the caller constraints
     21 * about IO, FS and watermark checking while ignoring placement
     22 * hints such as HIGHMEM usage.
     23 */
     24#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
     25			__GFP_NOWARN|__GFP_RETRY_MAYFAIL|__GFP_NOFAIL|\
     26			__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
     27			__GFP_ATOMIC|__GFP_NOLOCKDEP)
     28
     29/* The GFP flags allowed during early boot */
     30#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
     31
     32/* Control allocation cpuset and node placement constraints */
     33#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
     34
     35/* Do not use these with a slab allocator */
     36#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
     37
     38/*
     39 * Different from WARN_ON_ONCE(), no warning will be issued
     40 * when we specify __GFP_NOWARN.
     41 */
     42#define WARN_ON_ONCE_GFP(cond, gfp)	({				\
     43	static bool __section(".data.once") __warned;			\
     44	int __ret_warn_once = !!(cond);					\
     45									\
     46	if (unlikely(!(gfp & __GFP_NOWARN) && __ret_warn_once && !__warned)) { \
     47		__warned = true;					\
     48		WARN_ON(1);						\
     49	}								\
     50	unlikely(__ret_warn_once);					\
     51})
     52
     53void page_writeback_init(void);
     54
     55static inline void *folio_raw_mapping(struct folio *folio)
     56{
     57	unsigned long mapping = (unsigned long)folio->mapping;
     58
     59	return (void *)(mapping & ~PAGE_MAPPING_FLAGS);
     60}
     61
     62void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
     63						int nr_throttled);
     64static inline void acct_reclaim_writeback(struct folio *folio)
     65{
     66	pg_data_t *pgdat = folio_pgdat(folio);
     67	int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
     68
     69	if (nr_throttled)
     70		__acct_reclaim_writeback(pgdat, folio, nr_throttled);
     71}
     72
     73static inline void wake_throttle_isolated(pg_data_t *pgdat)
     74{
     75	wait_queue_head_t *wqh;
     76
     77	wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
     78	if (waitqueue_active(wqh))
     79		wake_up(wqh);
     80}
     81
     82vm_fault_t do_swap_page(struct vm_fault *vmf);
     83void folio_rotate_reclaimable(struct folio *folio);
     84bool __folio_end_writeback(struct folio *folio);
     85void deactivate_file_folio(struct folio *folio);
     86
     87void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
     88		unsigned long floor, unsigned long ceiling);
     89void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
     90
     91struct zap_details;
     92void unmap_page_range(struct mmu_gather *tlb,
     93			     struct vm_area_struct *vma,
     94			     unsigned long addr, unsigned long end,
     95			     struct zap_details *details);
     96
     97void page_cache_ra_order(struct readahead_control *, struct file_ra_state *,
     98		unsigned int order);
     99void force_page_cache_ra(struct readahead_control *, unsigned long nr);
    100static inline void force_page_cache_readahead(struct address_space *mapping,
    101		struct file *file, pgoff_t index, unsigned long nr_to_read)
    102{
    103	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index);
    104	force_page_cache_ra(&ractl, nr_to_read);
    105}
    106
    107unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
    108		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
    109unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
    110		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices);
    111void filemap_free_folio(struct address_space *mapping, struct folio *folio);
    112int truncate_inode_folio(struct address_space *mapping, struct folio *folio);
    113bool truncate_inode_partial_folio(struct folio *folio, loff_t start,
    114		loff_t end);
    115long invalidate_inode_page(struct page *page);
    116unsigned long invalidate_mapping_pagevec(struct address_space *mapping,
    117		pgoff_t start, pgoff_t end, unsigned long *nr_pagevec);
    118
    119/**
    120 * folio_evictable - Test whether a folio is evictable.
    121 * @folio: The folio to test.
    122 *
    123 * Test whether @folio is evictable -- i.e., should be placed on
    124 * active/inactive lists vs unevictable list.
    125 *
    126 * Reasons folio might not be evictable:
    127 * 1. folio's mapping marked unevictable
    128 * 2. One of the pages in the folio is part of an mlocked VMA
    129 */
    130static inline bool folio_evictable(struct folio *folio)
    131{
    132	bool ret;
    133
    134	/* Prevent address_space of inode and swap cache from being freed */
    135	rcu_read_lock();
    136	ret = !mapping_unevictable(folio_mapping(folio)) &&
    137			!folio_test_mlocked(folio);
    138	rcu_read_unlock();
    139	return ret;
    140}
    141
    142static inline bool page_evictable(struct page *page)
    143{
    144	bool ret;
    145
    146	/* Prevent address_space of inode and swap cache from being freed */
    147	rcu_read_lock();
    148	ret = !mapping_unevictable(page_mapping(page)) && !PageMlocked(page);
    149	rcu_read_unlock();
    150	return ret;
    151}
    152
    153/*
    154 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
    155 * a count of one.
    156 */
    157static inline void set_page_refcounted(struct page *page)
    158{
    159	VM_BUG_ON_PAGE(PageTail(page), page);
    160	VM_BUG_ON_PAGE(page_ref_count(page), page);
    161	set_page_count(page, 1);
    162}
    163
    164extern unsigned long highest_memmap_pfn;
    165
    166/*
    167 * Maximum number of reclaim retries without progress before the OOM
    168 * killer is consider the only way forward.
    169 */
    170#define MAX_RECLAIM_RETRIES 16
    171
    172/*
    173 * in mm/early_ioremap.c
    174 */
    175pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
    176					unsigned long size, pgprot_t prot);
    177
    178/*
    179 * in mm/vmscan.c:
    180 */
    181int isolate_lru_page(struct page *page);
    182int folio_isolate_lru(struct folio *folio);
    183void putback_lru_page(struct page *page);
    184void folio_putback_lru(struct folio *folio);
    185extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
    186
    187/*
    188 * in mm/rmap.c:
    189 */
    190extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
    191
    192/*
    193 * in mm/page_alloc.c
    194 */
    195
    196/*
    197 * Structure for holding the mostly immutable allocation parameters passed
    198 * between functions involved in allocations, including the alloc_pages*
    199 * family of functions.
    200 *
    201 * nodemask, migratetype and highest_zoneidx are initialized only once in
    202 * __alloc_pages() and then never change.
    203 *
    204 * zonelist, preferred_zone and highest_zoneidx are set first in
    205 * __alloc_pages() for the fast path, and might be later changed
    206 * in __alloc_pages_slowpath(). All other functions pass the whole structure
    207 * by a const pointer.
    208 */
    209struct alloc_context {
    210	struct zonelist *zonelist;
    211	nodemask_t *nodemask;
    212	struct zoneref *preferred_zoneref;
    213	int migratetype;
    214
    215	/*
    216	 * highest_zoneidx represents highest usable zone index of
    217	 * the allocation request. Due to the nature of the zone,
    218	 * memory on lower zone than the highest_zoneidx will be
    219	 * protected by lowmem_reserve[highest_zoneidx].
    220	 *
    221	 * highest_zoneidx is also used by reclaim/compaction to limit
    222	 * the target zone since higher zone than this index cannot be
    223	 * usable for this allocation request.
    224	 */
    225	enum zone_type highest_zoneidx;
    226	bool spread_dirty_pages;
    227};
    228
    229/*
    230 * This function returns the order of a free page in the buddy system. In
    231 * general, page_zone(page)->lock must be held by the caller to prevent the
    232 * page from being allocated in parallel and returning garbage as the order.
    233 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
    234 * page cannot be allocated or merged in parallel. Alternatively, it must
    235 * handle invalid values gracefully, and use buddy_order_unsafe() below.
    236 */
    237static inline unsigned int buddy_order(struct page *page)
    238{
    239	/* PageBuddy() must be checked by the caller */
    240	return page_private(page);
    241}
    242
    243/*
    244 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
    245 * PageBuddy() should be checked first by the caller to minimize race window,
    246 * and invalid values must be handled gracefully.
    247 *
    248 * READ_ONCE is used so that if the caller assigns the result into a local
    249 * variable and e.g. tests it for valid range before using, the compiler cannot
    250 * decide to remove the variable and inline the page_private(page) multiple
    251 * times, potentially observing different values in the tests and the actual
    252 * use of the result.
    253 */
    254#define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
    255
    256/*
    257 * This function checks whether a page is free && is the buddy
    258 * we can coalesce a page and its buddy if
    259 * (a) the buddy is not in a hole (check before calling!) &&
    260 * (b) the buddy is in the buddy system &&
    261 * (c) a page and its buddy have the same order &&
    262 * (d) a page and its buddy are in the same zone.
    263 *
    264 * For recording whether a page is in the buddy system, we set PageBuddy.
    265 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
    266 *
    267 * For recording page's order, we use page_private(page).
    268 */
    269static inline bool page_is_buddy(struct page *page, struct page *buddy,
    270				 unsigned int order)
    271{
    272	if (!page_is_guard(buddy) && !PageBuddy(buddy))
    273		return false;
    274
    275	if (buddy_order(buddy) != order)
    276		return false;
    277
    278	/*
    279	 * zone check is done late to avoid uselessly calculating
    280	 * zone/node ids for pages that could never merge.
    281	 */
    282	if (page_zone_id(page) != page_zone_id(buddy))
    283		return false;
    284
    285	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
    286
    287	return true;
    288}
    289
    290/*
    291 * Locate the struct page for both the matching buddy in our
    292 * pair (buddy1) and the combined O(n+1) page they form (page).
    293 *
    294 * 1) Any buddy B1 will have an order O twin B2 which satisfies
    295 * the following equation:
    296 *     B2 = B1 ^ (1 << O)
    297 * For example, if the starting buddy (buddy2) is #8 its order
    298 * 1 buddy is #10:
    299 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
    300 *
    301 * 2) Any buddy B will have an order O+1 parent P which
    302 * satisfies the following equation:
    303 *     P = B & ~(1 << O)
    304 *
    305 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
    306 */
    307static inline unsigned long
    308__find_buddy_pfn(unsigned long page_pfn, unsigned int order)
    309{
    310	return page_pfn ^ (1 << order);
    311}
    312
    313/*
    314 * Find the buddy of @page and validate it.
    315 * @page: The input page
    316 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
    317 *       function is used in the performance-critical __free_one_page().
    318 * @order: The order of the page
    319 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
    320 *             page_to_pfn().
    321 *
    322 * The found buddy can be a non PageBuddy, out of @page's zone, or its order is
    323 * not the same as @page. The validation is necessary before use it.
    324 *
    325 * Return: the found buddy page or NULL if not found.
    326 */
    327static inline struct page *find_buddy_page_pfn(struct page *page,
    328			unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
    329{
    330	unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
    331	struct page *buddy;
    332
    333	buddy = page + (__buddy_pfn - pfn);
    334	if (buddy_pfn)
    335		*buddy_pfn = __buddy_pfn;
    336
    337	if (page_is_buddy(page, buddy, order))
    338		return buddy;
    339	return NULL;
    340}
    341
    342extern struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
    343				unsigned long end_pfn, struct zone *zone);
    344
    345static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn,
    346				unsigned long end_pfn, struct zone *zone)
    347{
    348	if (zone->contiguous)
    349		return pfn_to_page(start_pfn);
    350
    351	return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
    352}
    353
    354extern int __isolate_free_page(struct page *page, unsigned int order);
    355extern void __putback_isolated_page(struct page *page, unsigned int order,
    356				    int mt);
    357extern void memblock_free_pages(struct page *page, unsigned long pfn,
    358					unsigned int order);
    359extern void __free_pages_core(struct page *page, unsigned int order);
    360extern void prep_compound_page(struct page *page, unsigned int order);
    361extern void post_alloc_hook(struct page *page, unsigned int order,
    362					gfp_t gfp_flags);
    363extern int user_min_free_kbytes;
    364
    365extern void free_unref_page(struct page *page, unsigned int order);
    366extern void free_unref_page_list(struct list_head *list);
    367
    368extern void zone_pcp_update(struct zone *zone, int cpu_online);
    369extern void zone_pcp_reset(struct zone *zone);
    370extern void zone_pcp_disable(struct zone *zone);
    371extern void zone_pcp_enable(struct zone *zone);
    372
    373extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
    374			  phys_addr_t min_addr,
    375			  int nid, bool exact_nid);
    376
    377int split_free_page(struct page *free_page,
    378			unsigned int order, unsigned long split_pfn_offset);
    379
    380#if defined CONFIG_COMPACTION || defined CONFIG_CMA
    381
    382/*
    383 * in mm/compaction.c
    384 */
    385/*
    386 * compact_control is used to track pages being migrated and the free pages
    387 * they are being migrated to during memory compaction. The free_pfn starts
    388 * at the end of a zone and migrate_pfn begins at the start. Movable pages
    389 * are moved to the end of a zone during a compaction run and the run
    390 * completes when free_pfn <= migrate_pfn
    391 */
    392struct compact_control {
    393	struct list_head freepages;	/* List of free pages to migrate to */
    394	struct list_head migratepages;	/* List of pages being migrated */
    395	unsigned int nr_freepages;	/* Number of isolated free pages */
    396	unsigned int nr_migratepages;	/* Number of pages to migrate */
    397	unsigned long free_pfn;		/* isolate_freepages search base */
    398	/*
    399	 * Acts as an in/out parameter to page isolation for migration.
    400	 * isolate_migratepages uses it as a search base.
    401	 * isolate_migratepages_block will update the value to the next pfn
    402	 * after the last isolated one.
    403	 */
    404	unsigned long migrate_pfn;
    405	unsigned long fast_start_pfn;	/* a pfn to start linear scan from */
    406	struct zone *zone;
    407	unsigned long total_migrate_scanned;
    408	unsigned long total_free_scanned;
    409	unsigned short fast_search_fail;/* failures to use free list searches */
    410	short search_order;		/* order to start a fast search at */
    411	const gfp_t gfp_mask;		/* gfp mask of a direct compactor */
    412	int order;			/* order a direct compactor needs */
    413	int migratetype;		/* migratetype of direct compactor */
    414	const unsigned int alloc_flags;	/* alloc flags of a direct compactor */
    415	const int highest_zoneidx;	/* zone index of a direct compactor */
    416	enum migrate_mode mode;		/* Async or sync migration mode */
    417	bool ignore_skip_hint;		/* Scan blocks even if marked skip */
    418	bool no_set_skip_hint;		/* Don't mark blocks for skipping */
    419	bool ignore_block_suitable;	/* Scan blocks considered unsuitable */
    420	bool direct_compaction;		/* False from kcompactd or /proc/... */
    421	bool proactive_compaction;	/* kcompactd proactive compaction */
    422	bool whole_zone;		/* Whole zone should/has been scanned */
    423	bool contended;			/* Signal lock contention */
    424	bool rescan;			/* Rescanning the same pageblock */
    425	bool alloc_contig;		/* alloc_contig_range allocation */
    426};
    427
    428/*
    429 * Used in direct compaction when a page should be taken from the freelists
    430 * immediately when one is created during the free path.
    431 */
    432struct capture_control {
    433	struct compact_control *cc;
    434	struct page *page;
    435};
    436
    437unsigned long
    438isolate_freepages_range(struct compact_control *cc,
    439			unsigned long start_pfn, unsigned long end_pfn);
    440int
    441isolate_migratepages_range(struct compact_control *cc,
    442			   unsigned long low_pfn, unsigned long end_pfn);
    443
    444int __alloc_contig_migrate_range(struct compact_control *cc,
    445					unsigned long start, unsigned long end);
    446#endif
    447int find_suitable_fallback(struct free_area *area, unsigned int order,
    448			int migratetype, bool only_stealable, bool *can_steal);
    449
    450/*
    451 * These three helpers classifies VMAs for virtual memory accounting.
    452 */
    453
    454/*
    455 * Executable code area - executable, not writable, not stack
    456 */
    457static inline bool is_exec_mapping(vm_flags_t flags)
    458{
    459	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
    460}
    461
    462/*
    463 * Stack area - automatically grows in one direction
    464 *
    465 * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
    466 * do_mmap() forbids all other combinations.
    467 */
    468static inline bool is_stack_mapping(vm_flags_t flags)
    469{
    470	return (flags & VM_STACK) == VM_STACK;
    471}
    472
    473/*
    474 * Data area - private, writable, not stack
    475 */
    476static inline bool is_data_mapping(vm_flags_t flags)
    477{
    478	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
    479}
    480
    481/* mm/util.c */
    482void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
    483		struct vm_area_struct *prev);
    484void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
    485struct anon_vma *folio_anon_vma(struct folio *folio);
    486
    487#ifdef CONFIG_MMU
    488void unmap_mapping_folio(struct folio *folio);
    489extern long populate_vma_page_range(struct vm_area_struct *vma,
    490		unsigned long start, unsigned long end, int *locked);
    491extern long faultin_vma_page_range(struct vm_area_struct *vma,
    492				   unsigned long start, unsigned long end,
    493				   bool write, int *locked);
    494extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
    495			      unsigned long len);
    496/*
    497 * mlock_vma_page() and munlock_vma_page():
    498 * should be called with vma's mmap_lock held for read or write,
    499 * under page table lock for the pte/pmd being added or removed.
    500 *
    501 * mlock is usually called at the end of page_add_*_rmap(),
    502 * munlock at the end of page_remove_rmap(); but new anon
    503 * pages are managed by lru_cache_add_inactive_or_unevictable()
    504 * calling mlock_new_page().
    505 *
    506 * @compound is used to include pmd mappings of THPs, but filter out
    507 * pte mappings of THPs, which cannot be consistently counted: a pte
    508 * mapping of the THP head cannot be distinguished by the page alone.
    509 */
    510void mlock_folio(struct folio *folio);
    511static inline void mlock_vma_folio(struct folio *folio,
    512			struct vm_area_struct *vma, bool compound)
    513{
    514	/*
    515	 * The VM_SPECIAL check here serves two purposes.
    516	 * 1) VM_IO check prevents migration from double-counting during mlock.
    517	 * 2) Although mmap_region() and mlock_fixup() take care that VM_LOCKED
    518	 *    is never left set on a VM_SPECIAL vma, there is an interval while
    519	 *    file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may
    520	 *    still be set while VM_SPECIAL bits are added: so ignore it then.
    521	 */
    522	if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) &&
    523	    (compound || !folio_test_large(folio)))
    524		mlock_folio(folio);
    525}
    526
    527static inline void mlock_vma_page(struct page *page,
    528			struct vm_area_struct *vma, bool compound)
    529{
    530	mlock_vma_folio(page_folio(page), vma, compound);
    531}
    532
    533void munlock_page(struct page *page);
    534static inline void munlock_vma_page(struct page *page,
    535			struct vm_area_struct *vma, bool compound)
    536{
    537	if (unlikely(vma->vm_flags & VM_LOCKED) &&
    538	    (compound || !PageTransCompound(page)))
    539		munlock_page(page);
    540}
    541void mlock_new_page(struct page *page);
    542bool need_mlock_page_drain(int cpu);
    543void mlock_page_drain_local(void);
    544void mlock_page_drain_remote(int cpu);
    545
    546extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
    547
    548/*
    549 * Return the start of user virtual address at the specific offset within
    550 * a vma.
    551 */
    552static inline unsigned long
    553vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
    554		  struct vm_area_struct *vma)
    555{
    556	unsigned long address;
    557
    558	if (pgoff >= vma->vm_pgoff) {
    559		address = vma->vm_start +
    560			((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
    561		/* Check for address beyond vma (or wrapped through 0?) */
    562		if (address < vma->vm_start || address >= vma->vm_end)
    563			address = -EFAULT;
    564	} else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
    565		/* Test above avoids possibility of wrap to 0 on 32-bit */
    566		address = vma->vm_start;
    567	} else {
    568		address = -EFAULT;
    569	}
    570	return address;
    571}
    572
    573/*
    574 * Return the start of user virtual address of a page within a vma.
    575 * Returns -EFAULT if all of the page is outside the range of vma.
    576 * If page is a compound head, the entire compound page is considered.
    577 */
    578static inline unsigned long
    579vma_address(struct page *page, struct vm_area_struct *vma)
    580{
    581	VM_BUG_ON_PAGE(PageKsm(page), page);	/* KSM page->index unusable */
    582	return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
    583}
    584
    585/*
    586 * Then at what user virtual address will none of the range be found in vma?
    587 * Assumes that vma_address() already returned a good starting address.
    588 */
    589static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw)
    590{
    591	struct vm_area_struct *vma = pvmw->vma;
    592	pgoff_t pgoff;
    593	unsigned long address;
    594
    595	/* Common case, plus ->pgoff is invalid for KSM */
    596	if (pvmw->nr_pages == 1)
    597		return pvmw->address + PAGE_SIZE;
    598
    599	pgoff = pvmw->pgoff + pvmw->nr_pages;
    600	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
    601	/* Check for address beyond vma (or wrapped through 0?) */
    602	if (address < vma->vm_start || address > vma->vm_end)
    603		address = vma->vm_end;
    604	return address;
    605}
    606
    607static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
    608						    struct file *fpin)
    609{
    610	int flags = vmf->flags;
    611
    612	if (fpin)
    613		return fpin;
    614
    615	/*
    616	 * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
    617	 * anything, so we only pin the file and drop the mmap_lock if only
    618	 * FAULT_FLAG_ALLOW_RETRY is set, while this is the first attempt.
    619	 */
    620	if (fault_flag_allow_retry_first(flags) &&
    621	    !(flags & FAULT_FLAG_RETRY_NOWAIT)) {
    622		fpin = get_file(vmf->vma->vm_file);
    623		mmap_read_unlock(vmf->vma->vm_mm);
    624	}
    625	return fpin;
    626}
    627#else /* !CONFIG_MMU */
    628static inline void unmap_mapping_folio(struct folio *folio) { }
    629static inline void mlock_vma_page(struct page *page,
    630			struct vm_area_struct *vma, bool compound) { }
    631static inline void munlock_vma_page(struct page *page,
    632			struct vm_area_struct *vma, bool compound) { }
    633static inline void mlock_new_page(struct page *page) { }
    634static inline bool need_mlock_page_drain(int cpu) { return false; }
    635static inline void mlock_page_drain_local(void) { }
    636static inline void mlock_page_drain_remote(int cpu) { }
    637static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
    638{
    639}
    640#endif /* !CONFIG_MMU */
    641
    642/*
    643 * Return the mem_map entry representing the 'offset' subpage within
    644 * the maximally aligned gigantic page 'base'.  Handle any discontiguity
    645 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
    646 */
    647static inline struct page *mem_map_offset(struct page *base, int offset)
    648{
    649	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
    650		return nth_page(base, offset);
    651	return base + offset;
    652}
    653
    654/*
    655 * Iterator over all subpages within the maximally aligned gigantic
    656 * page 'base'.  Handle any discontiguity in the mem_map.
    657 */
    658static inline struct page *mem_map_next(struct page *iter,
    659						struct page *base, int offset)
    660{
    661	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
    662		unsigned long pfn = page_to_pfn(base) + offset;
    663		if (!pfn_valid(pfn))
    664			return NULL;
    665		return pfn_to_page(pfn);
    666	}
    667	return iter + 1;
    668}
    669
    670/* Memory initialisation debug and verification */
    671enum mminit_level {
    672	MMINIT_WARNING,
    673	MMINIT_VERIFY,
    674	MMINIT_TRACE
    675};
    676
    677#ifdef CONFIG_DEBUG_MEMORY_INIT
    678
    679extern int mminit_loglevel;
    680
    681#define mminit_dprintk(level, prefix, fmt, arg...) \
    682do { \
    683	if (level < mminit_loglevel) { \
    684		if (level <= MMINIT_WARNING) \
    685			pr_warn("mminit::" prefix " " fmt, ##arg);	\
    686		else \
    687			printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
    688	} \
    689} while (0)
    690
    691extern void mminit_verify_pageflags_layout(void);
    692extern void mminit_verify_zonelist(void);
    693#else
    694
    695static inline void mminit_dprintk(enum mminit_level level,
    696				const char *prefix, const char *fmt, ...)
    697{
    698}
    699
    700static inline void mminit_verify_pageflags_layout(void)
    701{
    702}
    703
    704static inline void mminit_verify_zonelist(void)
    705{
    706}
    707#endif /* CONFIG_DEBUG_MEMORY_INIT */
    708
    709#define NODE_RECLAIM_NOSCAN	-2
    710#define NODE_RECLAIM_FULL	-1
    711#define NODE_RECLAIM_SOME	0
    712#define NODE_RECLAIM_SUCCESS	1
    713
    714#ifdef CONFIG_NUMA
    715extern int node_reclaim(struct pglist_data *, gfp_t, unsigned int);
    716extern int find_next_best_node(int node, nodemask_t *used_node_mask);
    717#else
    718static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
    719				unsigned int order)
    720{
    721	return NODE_RECLAIM_NOSCAN;
    722}
    723static inline int find_next_best_node(int node, nodemask_t *used_node_mask)
    724{
    725	return NUMA_NO_NODE;
    726}
    727#endif
    728
    729/*
    730 * mm/memory-failure.c
    731 */
    732extern int hwpoison_filter(struct page *p);
    733
    734extern u32 hwpoison_filter_dev_major;
    735extern u32 hwpoison_filter_dev_minor;
    736extern u64 hwpoison_filter_flags_mask;
    737extern u64 hwpoison_filter_flags_value;
    738extern u64 hwpoison_filter_memcg;
    739extern u32 hwpoison_filter_enable;
    740
    741#ifdef CONFIG_MEMORY_FAILURE
    742void clear_hwpoisoned_pages(struct page *memmap, int nr_pages);
    743#else
    744static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
    745{
    746}
    747#endif
    748
    749extern unsigned long  __must_check vm_mmap_pgoff(struct file *, unsigned long,
    750        unsigned long, unsigned long,
    751        unsigned long, unsigned long);
    752
    753extern void set_pageblock_order(void);
    754unsigned int reclaim_clean_pages_from_list(struct zone *zone,
    755					    struct list_head *page_list);
    756/* The ALLOC_WMARK bits are used as an index to zone->watermark */
    757#define ALLOC_WMARK_MIN		WMARK_MIN
    758#define ALLOC_WMARK_LOW		WMARK_LOW
    759#define ALLOC_WMARK_HIGH	WMARK_HIGH
    760#define ALLOC_NO_WATERMARKS	0x04 /* don't check watermarks at all */
    761
    762/* Mask to get the watermark bits */
    763#define ALLOC_WMARK_MASK	(ALLOC_NO_WATERMARKS-1)
    764
    765/*
    766 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
    767 * cannot assume a reduced access to memory reserves is sufficient for
    768 * !MMU
    769 */
    770#ifdef CONFIG_MMU
    771#define ALLOC_OOM		0x08
    772#else
    773#define ALLOC_OOM		ALLOC_NO_WATERMARKS
    774#endif
    775
    776#define ALLOC_HARDER		 0x10 /* try to alloc harder */
    777#define ALLOC_HIGH		 0x20 /* __GFP_HIGH set */
    778#define ALLOC_CPUSET		 0x40 /* check for correct cpuset */
    779#define ALLOC_CMA		 0x80 /* allow allocations from CMA areas */
    780#ifdef CONFIG_ZONE_DMA32
    781#define ALLOC_NOFRAGMENT	0x100 /* avoid mixing pageblock types */
    782#else
    783#define ALLOC_NOFRAGMENT	  0x0
    784#endif
    785#define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */
    786
    787enum ttu_flags;
    788struct tlbflush_unmap_batch;
    789
    790
    791/*
    792 * only for MM internal work items which do not depend on
    793 * any allocations or locks which might depend on allocations
    794 */
    795extern struct workqueue_struct *mm_percpu_wq;
    796
    797#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
    798void try_to_unmap_flush(void);
    799void try_to_unmap_flush_dirty(void);
    800void flush_tlb_batched_pending(struct mm_struct *mm);
    801#else
    802static inline void try_to_unmap_flush(void)
    803{
    804}
    805static inline void try_to_unmap_flush_dirty(void)
    806{
    807}
    808static inline void flush_tlb_batched_pending(struct mm_struct *mm)
    809{
    810}
    811#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
    812
    813extern const struct trace_print_flags pageflag_names[];
    814extern const struct trace_print_flags vmaflag_names[];
    815extern const struct trace_print_flags gfpflag_names[];
    816
    817static inline bool is_migrate_highatomic(enum migratetype migratetype)
    818{
    819	return migratetype == MIGRATE_HIGHATOMIC;
    820}
    821
    822static inline bool is_migrate_highatomic_page(struct page *page)
    823{
    824	return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
    825}
    826
    827void setup_zone_pageset(struct zone *zone);
    828
    829struct migration_target_control {
    830	int nid;		/* preferred node id */
    831	nodemask_t *nmask;
    832	gfp_t gfp_mask;
    833};
    834
    835/*
    836 * mm/vmalloc.c
    837 */
    838#ifdef CONFIG_MMU
    839int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
    840                pgprot_t prot, struct page **pages, unsigned int page_shift);
    841#else
    842static inline
    843int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
    844                pgprot_t prot, struct page **pages, unsigned int page_shift)
    845{
    846	return -EINVAL;
    847}
    848#endif
    849
    850void vunmap_range_noflush(unsigned long start, unsigned long end);
    851
    852int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
    853		      unsigned long addr, int page_nid, int *flags);
    854
    855void free_zone_device_page(struct page *page);
    856
    857/*
    858 * mm/gup.c
    859 */
    860struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
    861
    862DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
    863
    864#endif	/* __MM_INTERNAL_H */