page_alloc.c (6823B)
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7#include <asm/kvm_hyp.h> 8#include <nvhe/gfp.h> 9 10u64 __hyp_vmemmap; 11 12/* 13 * Index the hyp_vmemmap to find a potential buddy page, but make no assumption 14 * about its current state. 15 * 16 * Example buddy-tree for a 4-pages physically contiguous pool: 17 * 18 * o : Page 3 19 * / 20 * o-o : Page 2 21 * / 22 * / o : Page 1 23 * / / 24 * o---o-o : Page 0 25 * Order 2 1 0 26 * 27 * Example of requests on this pool: 28 * __find_buddy_nocheck(pool, page 0, order 0) => page 1 29 * __find_buddy_nocheck(pool, page 0, order 1) => page 2 30 * __find_buddy_nocheck(pool, page 1, order 0) => page 0 31 * __find_buddy_nocheck(pool, page 2, order 0) => page 3 32 */ 33static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool, 34 struct hyp_page *p, 35 unsigned short order) 36{ 37 phys_addr_t addr = hyp_page_to_phys(p); 38 39 addr ^= (PAGE_SIZE << order); 40 41 /* 42 * Don't return a page outside the pool range -- it belongs to 43 * something else and may not be mapped in hyp_vmemmap. 44 */ 45 if (addr < pool->range_start || addr >= pool->range_end) 46 return NULL; 47 48 return hyp_phys_to_page(addr); 49} 50 51/* Find a buddy page currently available for allocation */ 52static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool, 53 struct hyp_page *p, 54 unsigned short order) 55{ 56 struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order); 57 58 if (!buddy || buddy->order != order || buddy->refcount) 59 return NULL; 60 61 return buddy; 62 63} 64 65/* 66 * Pages that are available for allocation are tracked in free-lists, so we use 67 * the pages themselves to store the list nodes to avoid wasting space. As the 68 * allocator always returns zeroed pages (which are zeroed on the hyp_put_page() 69 * path to optimize allocation speed), we also need to clean-up the list node in 70 * each page when we take it out of the list. 71 */ 72static inline void page_remove_from_list(struct hyp_page *p) 73{ 74 struct list_head *node = hyp_page_to_virt(p); 75 76 __list_del_entry(node); 77 memset(node, 0, sizeof(*node)); 78} 79 80static inline void page_add_to_list(struct hyp_page *p, struct list_head *head) 81{ 82 struct list_head *node = hyp_page_to_virt(p); 83 84 INIT_LIST_HEAD(node); 85 list_add_tail(node, head); 86} 87 88static inline struct hyp_page *node_to_page(struct list_head *node) 89{ 90 return hyp_virt_to_page(node); 91} 92 93static void __hyp_attach_page(struct hyp_pool *pool, 94 struct hyp_page *p) 95{ 96 unsigned short order = p->order; 97 struct hyp_page *buddy; 98 99 memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order); 100 101 /* 102 * Only the first struct hyp_page of a high-order page (otherwise known 103 * as the 'head') should have p->order set. The non-head pages should 104 * have p->order = HYP_NO_ORDER. Here @p may no longer be the head 105 * after coalescing, so make sure to mark it HYP_NO_ORDER proactively. 106 */ 107 p->order = HYP_NO_ORDER; 108 for (; (order + 1) < pool->max_order; order++) { 109 buddy = __find_buddy_avail(pool, p, order); 110 if (!buddy) 111 break; 112 113 /* Take the buddy out of its list, and coalesce with @p */ 114 page_remove_from_list(buddy); 115 buddy->order = HYP_NO_ORDER; 116 p = min(p, buddy); 117 } 118 119 /* Mark the new head, and insert it */ 120 p->order = order; 121 page_add_to_list(p, &pool->free_area[order]); 122} 123 124static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool, 125 struct hyp_page *p, 126 unsigned short order) 127{ 128 struct hyp_page *buddy; 129 130 page_remove_from_list(p); 131 while (p->order > order) { 132 /* 133 * The buddy of order n - 1 currently has HYP_NO_ORDER as it 134 * is covered by a higher-level page (whose head is @p). Use 135 * __find_buddy_nocheck() to find it and inject it in the 136 * free_list[n - 1], effectively splitting @p in half. 137 */ 138 p->order--; 139 buddy = __find_buddy_nocheck(pool, p, p->order); 140 buddy->order = p->order; 141 page_add_to_list(buddy, &pool->free_area[buddy->order]); 142 } 143 144 return p; 145} 146 147static inline void hyp_page_ref_inc(struct hyp_page *p) 148{ 149 BUG_ON(p->refcount == USHRT_MAX); 150 p->refcount++; 151} 152 153static inline int hyp_page_ref_dec_and_test(struct hyp_page *p) 154{ 155 BUG_ON(!p->refcount); 156 p->refcount--; 157 return (p->refcount == 0); 158} 159 160static inline void hyp_set_page_refcounted(struct hyp_page *p) 161{ 162 BUG_ON(p->refcount); 163 p->refcount = 1; 164} 165 166static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p) 167{ 168 if (hyp_page_ref_dec_and_test(p)) 169 __hyp_attach_page(pool, p); 170} 171 172/* 173 * Changes to the buddy tree and page refcounts must be done with the hyp_pool 174 * lock held. If a refcount change requires an update to the buddy tree (e.g. 175 * hyp_put_page()), both operations must be done within the same critical 176 * section to guarantee transient states (e.g. a page with null refcount but 177 * not yet attached to a free list) can't be observed by well-behaved readers. 178 */ 179void hyp_put_page(struct hyp_pool *pool, void *addr) 180{ 181 struct hyp_page *p = hyp_virt_to_page(addr); 182 183 hyp_spin_lock(&pool->lock); 184 __hyp_put_page(pool, p); 185 hyp_spin_unlock(&pool->lock); 186} 187 188void hyp_get_page(struct hyp_pool *pool, void *addr) 189{ 190 struct hyp_page *p = hyp_virt_to_page(addr); 191 192 hyp_spin_lock(&pool->lock); 193 hyp_page_ref_inc(p); 194 hyp_spin_unlock(&pool->lock); 195} 196 197void hyp_split_page(struct hyp_page *p) 198{ 199 unsigned short order = p->order; 200 unsigned int i; 201 202 p->order = 0; 203 for (i = 1; i < (1 << order); i++) { 204 struct hyp_page *tail = p + i; 205 206 tail->order = 0; 207 hyp_set_page_refcounted(tail); 208 } 209} 210 211void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order) 212{ 213 unsigned short i = order; 214 struct hyp_page *p; 215 216 hyp_spin_lock(&pool->lock); 217 218 /* Look for a high-enough-order page */ 219 while (i < pool->max_order && list_empty(&pool->free_area[i])) 220 i++; 221 if (i >= pool->max_order) { 222 hyp_spin_unlock(&pool->lock); 223 return NULL; 224 } 225 226 /* Extract it from the tree at the right order */ 227 p = node_to_page(pool->free_area[i].next); 228 p = __hyp_extract_page(pool, p, order); 229 230 hyp_set_page_refcounted(p); 231 hyp_spin_unlock(&pool->lock); 232 233 return hyp_page_to_virt(p); 234} 235 236int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, 237 unsigned int reserved_pages) 238{ 239 phys_addr_t phys = hyp_pfn_to_phys(pfn); 240 struct hyp_page *p; 241 int i; 242 243 hyp_spin_lock_init(&pool->lock); 244 pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT)); 245 for (i = 0; i < pool->max_order; i++) 246 INIT_LIST_HEAD(&pool->free_area[i]); 247 pool->range_start = phys; 248 pool->range_end = phys + (nr_pages << PAGE_SHIFT); 249 250 /* Init the vmemmap portion */ 251 p = hyp_phys_to_page(phys); 252 for (i = 0; i < nr_pages; i++) { 253 p[i].order = 0; 254 hyp_set_page_refcounted(&p[i]); 255 } 256 257 /* Attach the unused pages to the buddy tree */ 258 for (i = reserved_pages; i < nr_pages; i++) 259 __hyp_put_page(pool, &p[i]); 260 261 return 0; 262}