memremap.h (6702B)
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_MEMREMAP_H_ 3#define _LINUX_MEMREMAP_H_ 4 5#include <linux/mm.h> 6#include <linux/range.h> 7#include <linux/ioport.h> 8#include <linux/percpu-refcount.h> 9 10struct resource; 11struct device; 12 13/** 14 * struct vmem_altmap - pre-allocated storage for vmemmap_populate 15 * @base_pfn: base of the entire dev_pagemap mapping 16 * @reserve: pages mapped, but reserved for driver use (relative to @base) 17 * @free: free pages set aside in the mapping for memmap storage 18 * @align: pages reserved to meet allocation alignments 19 * @alloc: track pages consumed, private to vmemmap_populate() 20 */ 21struct vmem_altmap { 22 unsigned long base_pfn; 23 const unsigned long end_pfn; 24 const unsigned long reserve; 25 unsigned long free; 26 unsigned long align; 27 unsigned long alloc; 28}; 29 30/* 31 * Specialize ZONE_DEVICE memory into multiple types each has a different 32 * usage. 33 * 34 * MEMORY_DEVICE_PRIVATE: 35 * Device memory that is not directly addressable by the CPU: CPU can neither 36 * read nor write private memory. In this case, we do still have struct pages 37 * backing the device memory. Doing so simplifies the implementation, but it is 38 * important to remember that there are certain points at which the struct page 39 * must be treated as an opaque object, rather than a "normal" struct page. 40 * 41 * A more complete discussion of unaddressable memory may be found in 42 * include/linux/hmm.h and Documentation/vm/hmm.rst. 43 * 44 * MEMORY_DEVICE_FS_DAX: 45 * Host memory that has similar access semantics as System RAM i.e. DMA 46 * coherent and supports page pinning. In support of coordinating page 47 * pinning vs other operations MEMORY_DEVICE_FS_DAX arranges for a 48 * wakeup event whenever a page is unpinned and becomes idle. This 49 * wakeup is used to coordinate physical address space management (ex: 50 * fs truncate/hole punch) vs pinned pages (ex: device dma). 51 * 52 * MEMORY_DEVICE_GENERIC: 53 * Host memory that has similar access semantics as System RAM i.e. DMA 54 * coherent and supports page pinning. This is for example used by DAX devices 55 * that expose memory using a character device. 56 * 57 * MEMORY_DEVICE_PCI_P2PDMA: 58 * Device memory residing in a PCI BAR intended for use with Peer-to-Peer 59 * transactions. 60 */ 61enum memory_type { 62 /* 0 is reserved to catch uninitialized type fields */ 63 MEMORY_DEVICE_PRIVATE = 1, 64 MEMORY_DEVICE_FS_DAX, 65 MEMORY_DEVICE_GENERIC, 66 MEMORY_DEVICE_PCI_P2PDMA, 67}; 68 69struct dev_pagemap_ops { 70 /* 71 * Called once the page refcount reaches 0. The reference count will be 72 * reset to one by the core code after the method is called to prepare 73 * for handing out the page again. 74 */ 75 void (*page_free)(struct page *page); 76 77 /* 78 * Used for private (un-addressable) device memory only. Must migrate 79 * the page back to a CPU accessible page. 80 */ 81 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); 82}; 83 84#define PGMAP_ALTMAP_VALID (1 << 0) 85 86/** 87 * struct dev_pagemap - metadata for ZONE_DEVICE mappings 88 * @altmap: pre-allocated/reserved memory for vmemmap allocations 89 * @ref: reference count that pins the devm_memremap_pages() mapping 90 * @done: completion for @ref 91 * @type: memory type: see MEMORY_* in memory_hotplug.h 92 * @flags: PGMAP_* flags to specify defailed behavior 93 * @vmemmap_shift: structural definition of how the vmemmap page metadata 94 * is populated, specifically the metadata page order. 95 * A zero value (default) uses base pages as the vmemmap metadata 96 * representation. A bigger value will set up compound struct pages 97 * of the requested order value. 98 * @ops: method table 99 * @owner: an opaque pointer identifying the entity that manages this 100 * instance. Used by various helpers to make sure that no 101 * foreign ZONE_DEVICE memory is accessed. 102 * @nr_range: number of ranges to be mapped 103 * @range: range to be mapped when nr_range == 1 104 * @ranges: array of ranges to be mapped when nr_range > 1 105 */ 106struct dev_pagemap { 107 struct vmem_altmap altmap; 108 struct percpu_ref ref; 109 struct completion done; 110 enum memory_type type; 111 unsigned int flags; 112 unsigned long vmemmap_shift; 113 const struct dev_pagemap_ops *ops; 114 void *owner; 115 int nr_range; 116 union { 117 struct range range; 118 struct range ranges[0]; 119 }; 120}; 121 122static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap) 123{ 124 if (pgmap->flags & PGMAP_ALTMAP_VALID) 125 return &pgmap->altmap; 126 return NULL; 127} 128 129static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) 130{ 131 return 1 << pgmap->vmemmap_shift; 132} 133 134static inline bool is_device_private_page(const struct page *page) 135{ 136 return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && 137 is_zone_device_page(page) && 138 page->pgmap->type == MEMORY_DEVICE_PRIVATE; 139} 140 141static inline bool folio_is_device_private(const struct folio *folio) 142{ 143 return is_device_private_page(&folio->page); 144} 145 146static inline bool is_pci_p2pdma_page(const struct page *page) 147{ 148 return IS_ENABLED(CONFIG_PCI_P2PDMA) && 149 is_zone_device_page(page) && 150 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; 151} 152 153#ifdef CONFIG_ZONE_DEVICE 154void *memremap_pages(struct dev_pagemap *pgmap, int nid); 155void memunmap_pages(struct dev_pagemap *pgmap); 156void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); 157void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); 158struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 159 struct dev_pagemap *pgmap); 160bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); 161 162unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); 163void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); 164unsigned long memremap_compat_align(void); 165#else 166static inline void *devm_memremap_pages(struct device *dev, 167 struct dev_pagemap *pgmap) 168{ 169 /* 170 * Fail attempts to call devm_memremap_pages() without 171 * ZONE_DEVICE support enabled, this requires callers to fall 172 * back to plain devm_memremap() based on config 173 */ 174 WARN_ON_ONCE(1); 175 return ERR_PTR(-ENXIO); 176} 177 178static inline void devm_memunmap_pages(struct device *dev, 179 struct dev_pagemap *pgmap) 180{ 181} 182 183static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 184 struct dev_pagemap *pgmap) 185{ 186 return NULL; 187} 188 189static inline bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn) 190{ 191 return false; 192} 193 194static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 195{ 196 return 0; 197} 198 199static inline void vmem_altmap_free(struct vmem_altmap *altmap, 200 unsigned long nr_pfns) 201{ 202} 203 204/* when memremap_pages() is disabled all archs can remap a single page */ 205static inline unsigned long memremap_compat_align(void) 206{ 207 return PAGE_SIZE; 208} 209#endif /* CONFIG_ZONE_DEVICE */ 210 211static inline void put_dev_pagemap(struct dev_pagemap *pgmap) 212{ 213 if (pgmap) 214 percpu_ref_put(&pgmap->ref); 215} 216 217#endif /* _LINUX_MEMREMAP_H_ */