cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

ttm_pool.c (18787B)


      1// SPDX-License-Identifier: GPL-2.0 OR MIT
      2/*
      3 * Copyright 2020 Advanced Micro Devices, Inc.
      4 *
      5 * Permission is hereby granted, free of charge, to any person obtaining a
      6 * copy of this software and associated documentation files (the "Software"),
      7 * to deal in the Software without restriction, including without limitation
      8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      9 * and/or sell copies of the Software, and to permit persons to whom the
     10 * Software is furnished to do so, subject to the following conditions:
     11 *
     12 * The above copyright notice and this permission notice shall be included in
     13 * all copies or substantial portions of the Software.
     14 *
     15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     21 * OTHER DEALINGS IN THE SOFTWARE.
     22 *
     23 * Authors: Christian König
     24 */
     25
     26/* Pooling of allocated pages is necessary because changing the caching
     27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
     28 * invalidate for those addresses.
     29 *
     30 * Additional to that allocations from the DMA coherent API are pooled as well
     31 * cause they are rather slow compared to alloc_pages+map.
     32 */
     33
     34#include <linux/module.h>
     35#include <linux/dma-mapping.h>
     36#include <linux/highmem.h>
     37#include <linux/sched/mm.h>
     38
     39#ifdef CONFIG_X86
     40#include <asm/set_memory.h>
     41#endif
     42
     43#include <drm/ttm/ttm_pool.h>
     44#include <drm/ttm/ttm_bo_driver.h>
     45#include <drm/ttm/ttm_tt.h>
     46
     47#include "ttm_module.h"
     48
     49/**
     50 * struct ttm_pool_dma - Helper object for coherent DMA mappings
     51 *
     52 * @addr: original DMA address returned for the mapping
     53 * @vaddr: original vaddr return for the mapping and order in the lower bits
     54 */
     55struct ttm_pool_dma {
     56	dma_addr_t addr;
     57	unsigned long vaddr;
     58};
     59
     60static unsigned long page_pool_size;
     61
     62MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
     63module_param(page_pool_size, ulong, 0644);
     64
     65static atomic_long_t allocated_pages;
     66
     67static struct ttm_pool_type global_write_combined[MAX_ORDER];
     68static struct ttm_pool_type global_uncached[MAX_ORDER];
     69
     70static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
     71static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
     72
     73static spinlock_t shrinker_lock;
     74static struct list_head shrinker_list;
     75static struct shrinker mm_shrinker;
     76
     77/* Allocate pages of size 1 << order with the given gfp_flags */
     78static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
     79					unsigned int order)
     80{
     81	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
     82	struct ttm_pool_dma *dma;
     83	struct page *p;
     84	void *vaddr;
     85
     86	/* Don't set the __GFP_COMP flag for higher order allocations.
     87	 * Mapping pages directly into an userspace process and calling
     88	 * put_page() on a TTM allocated page is illegal.
     89	 */
     90	if (order)
     91		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
     92			__GFP_KSWAPD_RECLAIM;
     93
     94	if (!pool->use_dma_alloc) {
     95		p = alloc_pages(gfp_flags, order);
     96		if (p)
     97			p->private = order;
     98		return p;
     99	}
    100
    101	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
    102	if (!dma)
    103		return NULL;
    104
    105	if (order)
    106		attr |= DMA_ATTR_NO_WARN;
    107
    108	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
    109				&dma->addr, gfp_flags, attr);
    110	if (!vaddr)
    111		goto error_free;
    112
    113	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
    114	 * TTM page fault handling and extend the DMA API to clean this up.
    115	 */
    116	if (is_vmalloc_addr(vaddr))
    117		p = vmalloc_to_page(vaddr);
    118	else
    119		p = virt_to_page(vaddr);
    120
    121	dma->vaddr = (unsigned long)vaddr | order;
    122	p->private = (unsigned long)dma;
    123	return p;
    124
    125error_free:
    126	kfree(dma);
    127	return NULL;
    128}
    129
    130/* Reset the caching and pages of size 1 << order */
    131static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
    132			       unsigned int order, struct page *p)
    133{
    134	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
    135	struct ttm_pool_dma *dma;
    136	void *vaddr;
    137
    138#ifdef CONFIG_X86
    139	/* We don't care that set_pages_wb is inefficient here. This is only
    140	 * used when we have to shrink and CPU overhead is irrelevant then.
    141	 */
    142	if (caching != ttm_cached && !PageHighMem(p))
    143		set_pages_wb(p, 1 << order);
    144#endif
    145
    146	if (!pool || !pool->use_dma_alloc) {
    147		__free_pages(p, order);
    148		return;
    149	}
    150
    151	if (order)
    152		attr |= DMA_ATTR_NO_WARN;
    153
    154	dma = (void *)p->private;
    155	vaddr = (void *)(dma->vaddr & PAGE_MASK);
    156	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
    157		       attr);
    158	kfree(dma);
    159}
    160
    161/* Apply a new caching to an array of pages */
    162static int ttm_pool_apply_caching(struct page **first, struct page **last,
    163				  enum ttm_caching caching)
    164{
    165#ifdef CONFIG_X86
    166	unsigned int num_pages = last - first;
    167
    168	if (!num_pages)
    169		return 0;
    170
    171	switch (caching) {
    172	case ttm_cached:
    173		break;
    174	case ttm_write_combined:
    175		return set_pages_array_wc(first, num_pages);
    176	case ttm_uncached:
    177		return set_pages_array_uc(first, num_pages);
    178	}
    179#endif
    180	return 0;
    181}
    182
    183/* Map pages of 1 << order size and fill the DMA address array  */
    184static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
    185			struct page *p, dma_addr_t **dma_addr)
    186{
    187	dma_addr_t addr;
    188	unsigned int i;
    189
    190	if (pool->use_dma_alloc) {
    191		struct ttm_pool_dma *dma = (void *)p->private;
    192
    193		addr = dma->addr;
    194	} else {
    195		size_t size = (1ULL << order) * PAGE_SIZE;
    196
    197		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
    198		if (dma_mapping_error(pool->dev, addr))
    199			return -EFAULT;
    200	}
    201
    202	for (i = 1 << order; i ; --i) {
    203		*(*dma_addr)++ = addr;
    204		addr += PAGE_SIZE;
    205	}
    206
    207	return 0;
    208}
    209
    210/* Unmap pages of 1 << order size */
    211static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
    212			   unsigned int num_pages)
    213{
    214	/* Unmapped while freeing the page */
    215	if (pool->use_dma_alloc)
    216		return;
    217
    218	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
    219		       DMA_BIDIRECTIONAL);
    220}
    221
    222/* Give pages into a specific pool_type */
    223static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
    224{
    225	unsigned int i, num_pages = 1 << pt->order;
    226
    227	for (i = 0; i < num_pages; ++i) {
    228		if (PageHighMem(p))
    229			clear_highpage(p + i);
    230		else
    231			clear_page(page_address(p + i));
    232	}
    233
    234	spin_lock(&pt->lock);
    235	list_add(&p->lru, &pt->pages);
    236	spin_unlock(&pt->lock);
    237	atomic_long_add(1 << pt->order, &allocated_pages);
    238}
    239
    240/* Take pages from a specific pool_type, return NULL when nothing available */
    241static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
    242{
    243	struct page *p;
    244
    245	spin_lock(&pt->lock);
    246	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
    247	if (p) {
    248		atomic_long_sub(1 << pt->order, &allocated_pages);
    249		list_del(&p->lru);
    250	}
    251	spin_unlock(&pt->lock);
    252
    253	return p;
    254}
    255
    256/* Initialize and add a pool type to the global shrinker list */
    257static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
    258			       enum ttm_caching caching, unsigned int order)
    259{
    260	pt->pool = pool;
    261	pt->caching = caching;
    262	pt->order = order;
    263	spin_lock_init(&pt->lock);
    264	INIT_LIST_HEAD(&pt->pages);
    265
    266	spin_lock(&shrinker_lock);
    267	list_add_tail(&pt->shrinker_list, &shrinker_list);
    268	spin_unlock(&shrinker_lock);
    269}
    270
    271/* Remove a pool_type from the global shrinker list and free all pages */
    272static void ttm_pool_type_fini(struct ttm_pool_type *pt)
    273{
    274	struct page *p;
    275
    276	spin_lock(&shrinker_lock);
    277	list_del(&pt->shrinker_list);
    278	spin_unlock(&shrinker_lock);
    279
    280	while ((p = ttm_pool_type_take(pt)))
    281		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
    282}
    283
    284/* Return the pool_type to use for the given caching and order */
    285static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
    286						  enum ttm_caching caching,
    287						  unsigned int order)
    288{
    289	if (pool->use_dma_alloc)
    290		return &pool->caching[caching].orders[order];
    291
    292#ifdef CONFIG_X86
    293	switch (caching) {
    294	case ttm_write_combined:
    295		if (pool->use_dma32)
    296			return &global_dma32_write_combined[order];
    297
    298		return &global_write_combined[order];
    299	case ttm_uncached:
    300		if (pool->use_dma32)
    301			return &global_dma32_uncached[order];
    302
    303		return &global_uncached[order];
    304	default:
    305		break;
    306	}
    307#endif
    308
    309	return NULL;
    310}
    311
    312/* Free pages using the global shrinker list */
    313static unsigned int ttm_pool_shrink(void)
    314{
    315	struct ttm_pool_type *pt;
    316	unsigned int num_pages;
    317	struct page *p;
    318
    319	spin_lock(&shrinker_lock);
    320	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
    321	list_move_tail(&pt->shrinker_list, &shrinker_list);
    322	spin_unlock(&shrinker_lock);
    323
    324	p = ttm_pool_type_take(pt);
    325	if (p) {
    326		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
    327		num_pages = 1 << pt->order;
    328	} else {
    329		num_pages = 0;
    330	}
    331
    332	return num_pages;
    333}
    334
    335/* Return the allocation order based for a page */
    336static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
    337{
    338	if (pool->use_dma_alloc) {
    339		struct ttm_pool_dma *dma = (void *)p->private;
    340
    341		return dma->vaddr & ~PAGE_MASK;
    342	}
    343
    344	return p->private;
    345}
    346
    347/**
    348 * ttm_pool_alloc - Fill a ttm_tt object
    349 *
    350 * @pool: ttm_pool to use
    351 * @tt: ttm_tt object to fill
    352 * @ctx: operation context
    353 *
    354 * Fill the ttm_tt object with pages and also make sure to DMA map them when
    355 * necessary.
    356 *
    357 * Returns: 0 on successe, negative error code otherwise.
    358 */
    359int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
    360		   struct ttm_operation_ctx *ctx)
    361{
    362	unsigned long num_pages = tt->num_pages;
    363	dma_addr_t *dma_addr = tt->dma_address;
    364	struct page **caching = tt->pages;
    365	struct page **pages = tt->pages;
    366	gfp_t gfp_flags = GFP_USER;
    367	unsigned int i, order;
    368	struct page *p;
    369	int r;
    370
    371	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
    372	WARN_ON(dma_addr && !pool->dev);
    373
    374	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
    375		gfp_flags |= __GFP_ZERO;
    376
    377	if (ctx->gfp_retry_mayfail)
    378		gfp_flags |= __GFP_RETRY_MAYFAIL;
    379
    380	if (pool->use_dma32)
    381		gfp_flags |= GFP_DMA32;
    382	else
    383		gfp_flags |= GFP_HIGHUSER;
    384
    385	for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages));
    386	     num_pages;
    387	     order = min_t(unsigned int, order, __fls(num_pages))) {
    388		bool apply_caching = false;
    389		struct ttm_pool_type *pt;
    390
    391		pt = ttm_pool_select_type(pool, tt->caching, order);
    392		p = pt ? ttm_pool_type_take(pt) : NULL;
    393		if (p) {
    394			apply_caching = true;
    395		} else {
    396			p = ttm_pool_alloc_page(pool, gfp_flags, order);
    397			if (p && PageHighMem(p))
    398				apply_caching = true;
    399		}
    400
    401		if (!p) {
    402			if (order) {
    403				--order;
    404				continue;
    405			}
    406			r = -ENOMEM;
    407			goto error_free_all;
    408		}
    409
    410		if (apply_caching) {
    411			r = ttm_pool_apply_caching(caching, pages,
    412						   tt->caching);
    413			if (r)
    414				goto error_free_page;
    415			caching = pages + (1 << order);
    416		}
    417
    418		if (dma_addr) {
    419			r = ttm_pool_map(pool, order, p, &dma_addr);
    420			if (r)
    421				goto error_free_page;
    422		}
    423
    424		num_pages -= 1 << order;
    425		for (i = 1 << order; i; --i)
    426			*(pages++) = p++;
    427	}
    428
    429	r = ttm_pool_apply_caching(caching, pages, tt->caching);
    430	if (r)
    431		goto error_free_all;
    432
    433	return 0;
    434
    435error_free_page:
    436	ttm_pool_free_page(pool, tt->caching, order, p);
    437
    438error_free_all:
    439	num_pages = tt->num_pages - num_pages;
    440	for (i = 0; i < num_pages; ) {
    441		order = ttm_pool_page_order(pool, tt->pages[i]);
    442		ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
    443		i += 1 << order;
    444	}
    445
    446	return r;
    447}
    448EXPORT_SYMBOL(ttm_pool_alloc);
    449
    450/**
    451 * ttm_pool_free - Free the backing pages from a ttm_tt object
    452 *
    453 * @pool: Pool to give pages back to.
    454 * @tt: ttm_tt object to unpopulate
    455 *
    456 * Give the packing pages back to a pool or free them
    457 */
    458void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
    459{
    460	unsigned int i;
    461
    462	for (i = 0; i < tt->num_pages; ) {
    463		struct page *p = tt->pages[i];
    464		unsigned int order, num_pages;
    465		struct ttm_pool_type *pt;
    466
    467		order = ttm_pool_page_order(pool, p);
    468		num_pages = 1ULL << order;
    469		if (tt->dma_address)
    470			ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
    471
    472		pt = ttm_pool_select_type(pool, tt->caching, order);
    473		if (pt)
    474			ttm_pool_type_give(pt, tt->pages[i]);
    475		else
    476			ttm_pool_free_page(pool, tt->caching, order,
    477					   tt->pages[i]);
    478
    479		i += num_pages;
    480	}
    481
    482	while (atomic_long_read(&allocated_pages) > page_pool_size)
    483		ttm_pool_shrink();
    484}
    485EXPORT_SYMBOL(ttm_pool_free);
    486
    487/**
    488 * ttm_pool_init - Initialize a pool
    489 *
    490 * @pool: the pool to initialize
    491 * @dev: device for DMA allocations and mappings
    492 * @use_dma_alloc: true if coherent DMA alloc should be used
    493 * @use_dma32: true if GFP_DMA32 should be used
    494 *
    495 * Initialize the pool and its pool types.
    496 */
    497void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
    498		   bool use_dma_alloc, bool use_dma32)
    499{
    500	unsigned int i, j;
    501
    502	WARN_ON(!dev && use_dma_alloc);
    503
    504	pool->dev = dev;
    505	pool->use_dma_alloc = use_dma_alloc;
    506	pool->use_dma32 = use_dma32;
    507
    508	if (use_dma_alloc) {
    509		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
    510			for (j = 0; j < MAX_ORDER; ++j)
    511				ttm_pool_type_init(&pool->caching[i].orders[j],
    512						   pool, i, j);
    513	}
    514}
    515
    516/**
    517 * ttm_pool_fini - Cleanup a pool
    518 *
    519 * @pool: the pool to clean up
    520 *
    521 * Free all pages in the pool and unregister the types from the global
    522 * shrinker.
    523 */
    524void ttm_pool_fini(struct ttm_pool *pool)
    525{
    526	unsigned int i, j;
    527
    528	if (pool->use_dma_alloc) {
    529		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
    530			for (j = 0; j < MAX_ORDER; ++j)
    531				ttm_pool_type_fini(&pool->caching[i].orders[j]);
    532	}
    533
    534	/* We removed the pool types from the LRU, but we need to also make sure
    535	 * that no shrinker is concurrently freeing pages from the pool.
    536	 */
    537	synchronize_shrinkers();
    538}
    539
    540/* As long as pages are available make sure to release at least one */
    541static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
    542					    struct shrink_control *sc)
    543{
    544	unsigned long num_freed = 0;
    545
    546	do
    547		num_freed += ttm_pool_shrink();
    548	while (!num_freed && atomic_long_read(&allocated_pages));
    549
    550	return num_freed;
    551}
    552
    553/* Return the number of pages available or SHRINK_EMPTY if we have none */
    554static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
    555					     struct shrink_control *sc)
    556{
    557	unsigned long num_pages = atomic_long_read(&allocated_pages);
    558
    559	return num_pages ? num_pages : SHRINK_EMPTY;
    560}
    561
    562#ifdef CONFIG_DEBUG_FS
    563/* Count the number of pages available in a pool_type */
    564static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
    565{
    566	unsigned int count = 0;
    567	struct page *p;
    568
    569	spin_lock(&pt->lock);
    570	/* Only used for debugfs, the overhead doesn't matter */
    571	list_for_each_entry(p, &pt->pages, lru)
    572		++count;
    573	spin_unlock(&pt->lock);
    574
    575	return count;
    576}
    577
    578/* Print a nice header for the order */
    579static void ttm_pool_debugfs_header(struct seq_file *m)
    580{
    581	unsigned int i;
    582
    583	seq_puts(m, "\t ");
    584	for (i = 0; i < MAX_ORDER; ++i)
    585		seq_printf(m, " ---%2u---", i);
    586	seq_puts(m, "\n");
    587}
    588
    589/* Dump information about the different pool types */
    590static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
    591				    struct seq_file *m)
    592{
    593	unsigned int i;
    594
    595	for (i = 0; i < MAX_ORDER; ++i)
    596		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
    597	seq_puts(m, "\n");
    598}
    599
    600/* Dump the total amount of allocated pages */
    601static void ttm_pool_debugfs_footer(struct seq_file *m)
    602{
    603	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
    604		   atomic_long_read(&allocated_pages), page_pool_size);
    605}
    606
    607/* Dump the information for the global pools */
    608static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
    609{
    610	ttm_pool_debugfs_header(m);
    611
    612	spin_lock(&shrinker_lock);
    613	seq_puts(m, "wc\t:");
    614	ttm_pool_debugfs_orders(global_write_combined, m);
    615	seq_puts(m, "uc\t:");
    616	ttm_pool_debugfs_orders(global_uncached, m);
    617	seq_puts(m, "wc 32\t:");
    618	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
    619	seq_puts(m, "uc 32\t:");
    620	ttm_pool_debugfs_orders(global_dma32_uncached, m);
    621	spin_unlock(&shrinker_lock);
    622
    623	ttm_pool_debugfs_footer(m);
    624
    625	return 0;
    626}
    627DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
    628
    629/**
    630 * ttm_pool_debugfs - Debugfs dump function for a pool
    631 *
    632 * @pool: the pool to dump the information for
    633 * @m: seq_file to dump to
    634 *
    635 * Make a debugfs dump with the per pool and global information.
    636 */
    637int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
    638{
    639	unsigned int i;
    640
    641	if (!pool->use_dma_alloc) {
    642		seq_puts(m, "unused\n");
    643		return 0;
    644	}
    645
    646	ttm_pool_debugfs_header(m);
    647
    648	spin_lock(&shrinker_lock);
    649	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
    650		seq_puts(m, "DMA ");
    651		switch (i) {
    652		case ttm_cached:
    653			seq_puts(m, "\t:");
    654			break;
    655		case ttm_write_combined:
    656			seq_puts(m, "wc\t:");
    657			break;
    658		case ttm_uncached:
    659			seq_puts(m, "uc\t:");
    660			break;
    661		}
    662		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
    663	}
    664	spin_unlock(&shrinker_lock);
    665
    666	ttm_pool_debugfs_footer(m);
    667	return 0;
    668}
    669EXPORT_SYMBOL(ttm_pool_debugfs);
    670
    671/* Test the shrinker functions and dump the result */
    672static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
    673{
    674	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
    675
    676	fs_reclaim_acquire(GFP_KERNEL);
    677	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
    678		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
    679	fs_reclaim_release(GFP_KERNEL);
    680
    681	return 0;
    682}
    683DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
    684
    685#endif
    686
    687/**
    688 * ttm_pool_mgr_init - Initialize globals
    689 *
    690 * @num_pages: default number of pages
    691 *
    692 * Initialize the global locks and lists for the MM shrinker.
    693 */
    694int ttm_pool_mgr_init(unsigned long num_pages)
    695{
    696	unsigned int i;
    697
    698	if (!page_pool_size)
    699		page_pool_size = num_pages;
    700
    701	spin_lock_init(&shrinker_lock);
    702	INIT_LIST_HEAD(&shrinker_list);
    703
    704	for (i = 0; i < MAX_ORDER; ++i) {
    705		ttm_pool_type_init(&global_write_combined[i], NULL,
    706				   ttm_write_combined, i);
    707		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
    708
    709		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
    710				   ttm_write_combined, i);
    711		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
    712				   ttm_uncached, i);
    713	}
    714
    715#ifdef CONFIG_DEBUG_FS
    716	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
    717			    &ttm_pool_debugfs_globals_fops);
    718	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
    719			    &ttm_pool_debugfs_shrink_fops);
    720#endif
    721
    722	mm_shrinker.count_objects = ttm_pool_shrinker_count;
    723	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
    724	mm_shrinker.seeks = 1;
    725	return register_shrinker(&mm_shrinker);
    726}
    727
    728/**
    729 * ttm_pool_mgr_fini - Finalize globals
    730 *
    731 * Cleanup the global pools and unregister the MM shrinker.
    732 */
    733void ttm_pool_mgr_fini(void)
    734{
    735	unsigned int i;
    736
    737	for (i = 0; i < MAX_ORDER; ++i) {
    738		ttm_pool_type_fini(&global_write_combined[i]);
    739		ttm_pool_type_fini(&global_uncached[i]);
    740
    741		ttm_pool_type_fini(&global_dma32_write_combined[i]);
    742		ttm_pool_type_fini(&global_dma32_uncached[i]);
    743	}
    744
    745	unregister_shrinker(&mm_shrinker);
    746	WARN_ON(!list_empty(&shrinker_list));
    747}