1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/*
3 * Copyright 2020 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Christian König
24 */
25
26/* Pooling of allocated pages is necessary because changing the caching
27 * attributes on x86 of the linear mapping requires a costly cross CPU TLB
28 * invalidate for those addresses.
29 *
30 * Additional to that allocations from the DMA coherent API are pooled as well
31 * cause they are rather slow compared to alloc_pages+map.
32 */
33
34#include <linux/module.h>
35#include <linux/dma-mapping.h>
36#include <linux/debugfs.h>
37#include <linux/highmem.h>
38#include <linux/sched/mm.h>
39
40#ifdef CONFIG_X86
41#include <asm/set_memory.h>
42#endif
43
44#include <drm/ttm/ttm_pool.h>
45#include <drm/ttm/ttm_tt.h>
46#include <drm/ttm/ttm_bo.h>
47
48#include "ttm_module.h"
49
50/**
51 * struct ttm_pool_dma - Helper object for coherent DMA mappings
52 *
53 * @addr: original DMA address returned for the mapping
54 * @vaddr: original vaddr return for the mapping and order in the lower bits
55 */
56struct ttm_pool_dma {
57	dma_addr_t addr;
58	unsigned long vaddr;
59};
60
61static unsigned long page_pool_size;
62
63MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
64module_param(page_pool_size, ulong, 0644);
65
66static atomic_long_t allocated_pages;
67
68static struct ttm_pool_type global_write_combined[MAX_ORDER + 1];
69static struct ttm_pool_type global_uncached[MAX_ORDER + 1];
70
71static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1];
72static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1];
73
74static spinlock_t shrinker_lock;
75static struct list_head shrinker_list;
76static struct shrinker mm_shrinker;
77
78/* Allocate pages of size 1 << order with the given gfp_flags */
79static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
80					unsigned int order)
81{
82	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
83	struct ttm_pool_dma *dma;
84	struct page *p;
85	void *vaddr;
86
87	/* Don't set the __GFP_COMP flag for higher order allocations.
88	 * Mapping pages directly into an userspace process and calling
89	 * put_page() on a TTM allocated page is illegal.
90	 */
91	if (order)
92		gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
93			__GFP_KSWAPD_RECLAIM;
94
95	if (!pool->use_dma_alloc) {
96		p = alloc_pages_node(pool->nid, gfp_flags, order);
97		if (p)
98			p->private = order;
99		return p;
100	}
101
102	dma = kmalloc(sizeof(*dma), GFP_KERNEL);
103	if (!dma)
104		return NULL;
105
106	if (order)
107		attr |= DMA_ATTR_NO_WARN;
108
109	vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
110				&dma->addr, gfp_flags, attr);
111	if (!vaddr)
112		goto error_free;
113
114	/* TODO: This is an illegal abuse of the DMA API, but we need to rework
115	 * TTM page fault handling and extend the DMA API to clean this up.
116	 */
117	if (is_vmalloc_addr(vaddr))
118		p = vmalloc_to_page(vaddr);
119	else
120		p = virt_to_page(vaddr);
121
122	dma->vaddr = (unsigned long)vaddr | order;
123	p->private = (unsigned long)dma;
124	return p;
125
126error_free:
127	kfree(dma);
128	return NULL;
129}
130
131/* Reset the caching and pages of size 1 << order */
132static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
133			       unsigned int order, struct page *p)
134{
135	unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
136	struct ttm_pool_dma *dma;
137	void *vaddr;
138
139#ifdef CONFIG_X86
140	/* We don't care that set_pages_wb is inefficient here. This is only
141	 * used when we have to shrink and CPU overhead is irrelevant then.
142	 */
143	if (caching != ttm_cached && !PageHighMem(p))
144		set_pages_wb(p, 1 << order);
145#endif
146
147	if (!pool || !pool->use_dma_alloc) {
148		__free_pages(p, order);
149		return;
150	}
151
152	if (order)
153		attr |= DMA_ATTR_NO_WARN;
154
155	dma = (void *)p->private;
156	vaddr = (void *)(dma->vaddr & PAGE_MASK);
157	dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
158		       attr);
159	kfree(dma);
160}
161
162/* Apply a new caching to an array of pages */
163static int ttm_pool_apply_caching(struct page **first, struct page **last,
164				  enum ttm_caching caching)
165{
166#ifdef CONFIG_X86
167	unsigned int num_pages = last - first;
168
169	if (!num_pages)
170		return 0;
171
172	switch (caching) {
173	case ttm_cached:
174		break;
175	case ttm_write_combined:
176		return set_pages_array_wc(first, num_pages);
177	case ttm_uncached:
178		return set_pages_array_uc(first, num_pages);
179	}
180#endif
181	return 0;
182}
183
184/* Map pages of 1 << order size and fill the DMA address array  */
185static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
186			struct page *p, dma_addr_t **dma_addr)
187{
188	dma_addr_t addr;
189	unsigned int i;
190
191	if (pool->use_dma_alloc) {
192		struct ttm_pool_dma *dma = (void *)p->private;
193
194		addr = dma->addr;
195	} else {
196		size_t size = (1ULL << order) * PAGE_SIZE;
197
198		addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
199		if (dma_mapping_error(pool->dev, addr))
200			return -EFAULT;
201	}
202
203	for (i = 1 << order; i ; --i) {
204		*(*dma_addr)++ = addr;
205		addr += PAGE_SIZE;
206	}
207
208	return 0;
209}
210
211/* Unmap pages of 1 << order size */
212static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
213			   unsigned int num_pages)
214{
215	/* Unmapped while freeing the page */
216	if (pool->use_dma_alloc)
217		return;
218
219	dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
220		       DMA_BIDIRECTIONAL);
221}
222
223/* Give pages into a specific pool_type */
224static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
225{
226	unsigned int i, num_pages = 1 << pt->order;
227
228	for (i = 0; i < num_pages; ++i) {
229		if (PageHighMem(p))
230			clear_highpage(p + i);
231		else
232			clear_page(page_address(p + i));
233	}
234
235	spin_lock(&pt->lock);
236	list_add(&p->lru, &pt->pages);
237	spin_unlock(&pt->lock);
238	atomic_long_add(1 << pt->order, &allocated_pages);
239}
240
241/* Take pages from a specific pool_type, return NULL when nothing available */
242static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
243{
244	struct page *p;
245
246	spin_lock(&pt->lock);
247	p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
248	if (p) {
249		atomic_long_sub(1 << pt->order, &allocated_pages);
250		list_del(&p->lru);
251	}
252	spin_unlock(&pt->lock);
253
254	return p;
255}
256
257/* Initialize and add a pool type to the global shrinker list */
258static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
259			       enum ttm_caching caching, unsigned int order)
260{
261	pt->pool = pool;
262	pt->caching = caching;
263	pt->order = order;
264	spin_lock_init(&pt->lock);
265	INIT_LIST_HEAD(&pt->pages);
266
267	spin_lock(&shrinker_lock);
268	list_add_tail(&pt->shrinker_list, &shrinker_list);
269	spin_unlock(&shrinker_lock);
270}
271
272/* Remove a pool_type from the global shrinker list and free all pages */
273static void ttm_pool_type_fini(struct ttm_pool_type *pt)
274{
275	struct page *p;
276
277	spin_lock(&shrinker_lock);
278	list_del(&pt->shrinker_list);
279	spin_unlock(&shrinker_lock);
280
281	while ((p = ttm_pool_type_take(pt)))
282		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
283}
284
285/* Return the pool_type to use for the given caching and order */
286static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
287						  enum ttm_caching caching,
288						  unsigned int order)
289{
290	if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
291		return &pool->caching[caching].orders[order];
292
293#ifdef CONFIG_X86
294	switch (caching) {
295	case ttm_write_combined:
296		if (pool->use_dma32)
297			return &global_dma32_write_combined[order];
298
299		return &global_write_combined[order];
300	case ttm_uncached:
301		if (pool->use_dma32)
302			return &global_dma32_uncached[order];
303
304		return &global_uncached[order];
305	default:
306		break;
307	}
308#endif
309
310	return NULL;
311}
312
313/* Free pages using the global shrinker list */
314static unsigned int ttm_pool_shrink(void)
315{
316	struct ttm_pool_type *pt;
317	unsigned int num_pages;
318	struct page *p;
319
320	spin_lock(&shrinker_lock);
321	pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
322	list_move_tail(&pt->shrinker_list, &shrinker_list);
323	spin_unlock(&shrinker_lock);
324
325	p = ttm_pool_type_take(pt);
326	if (p) {
327		ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
328		num_pages = 1 << pt->order;
329	} else {
330		num_pages = 0;
331	}
332
333	return num_pages;
334}
335
336/* Return the allocation order based for a page */
337static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
338{
339	if (pool->use_dma_alloc) {
340		struct ttm_pool_dma *dma = (void *)p->private;
341
342		return dma->vaddr & ~PAGE_MASK;
343	}
344
345	return p->private;
346}
347
348/* Called when we got a page, either from a pool or newly allocated */
349static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order,
350				   struct page *p, dma_addr_t **dma_addr,
351				   unsigned long *num_pages,
352				   struct page ***pages)
353{
354	unsigned int i;
355	int r;
356
357	if (*dma_addr) {
358		r = ttm_pool_map(pool, order, p, dma_addr);
359		if (r)
360			return r;
361	}
362
363	*num_pages -= 1 << order;
364	for (i = 1 << order; i; --i, ++(*pages), ++p)
365		**pages = p;
366
367	return 0;
368}
369
370/**
371 * ttm_pool_free_range() - Free a range of TTM pages
372 * @pool: The pool used for allocating.
373 * @tt: The struct ttm_tt holding the page pointers.
374 * @caching: The page caching mode used by the range.
375 * @start_page: index for first page to free.
376 * @end_page: index for last page to free + 1.
377 *
378 * During allocation the ttm_tt page-vector may be populated with ranges of
379 * pages with different attributes if allocation hit an error without being
380 * able to completely fulfill the allocation. This function can be used
381 * to free these individual ranges.
382 */
383static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
384				enum ttm_caching caching,
385				pgoff_t start_page, pgoff_t end_page)
386{
387	struct page **pages = &tt->pages[start_page];
388	unsigned int order;
389	pgoff_t i, nr;
390
391	for (i = start_page; i < end_page; i += nr, pages += nr) {
392		struct ttm_pool_type *pt = NULL;
393
394		order = ttm_pool_page_order(pool, *pages);
395		nr = (1UL << order);
396		if (tt->dma_address)
397			ttm_pool_unmap(pool, tt->dma_address[i], nr);
398
399		pt = ttm_pool_select_type(pool, caching, order);
400		if (pt)
401			ttm_pool_type_give(pt, *pages);
402		else
403			ttm_pool_free_page(pool, caching, order, *pages);
404	}
405}
406
407/**
408 * ttm_pool_alloc - Fill a ttm_tt object
409 *
410 * @pool: ttm_pool to use
411 * @tt: ttm_tt object to fill
412 * @ctx: operation context
413 *
414 * Fill the ttm_tt object with pages and also make sure to DMA map them when
415 * necessary.
416 *
417 * Returns: 0 on successe, negative error code otherwise.
418 */
419int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
420		   struct ttm_operation_ctx *ctx)
421{
422	pgoff_t num_pages = tt->num_pages;
423	dma_addr_t *dma_addr = tt->dma_address;
424	struct page **caching = tt->pages;
425	struct page **pages = tt->pages;
426	enum ttm_caching page_caching;
427	gfp_t gfp_flags = GFP_USER;
428	pgoff_t caching_divide;
429	unsigned int order;
430	struct page *p;
431	int r;
432
433	WARN_ON(!num_pages || ttm_tt_is_populated(tt));
434	WARN_ON(dma_addr && !pool->dev);
435
436	if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
437		gfp_flags |= __GFP_ZERO;
438
439	if (ctx->gfp_retry_mayfail)
440		gfp_flags |= __GFP_RETRY_MAYFAIL;
441
442	if (pool->use_dma32)
443		gfp_flags |= GFP_DMA32;
444	else
445		gfp_flags |= GFP_HIGHUSER;
446
447	for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages));
448	     num_pages;
449	     order = min_t(unsigned int, order, __fls(num_pages))) {
450		struct ttm_pool_type *pt;
451
452		page_caching = tt->caching;
453		pt = ttm_pool_select_type(pool, tt->caching, order);
454		p = pt ? ttm_pool_type_take(pt) : NULL;
455		if (p) {
456			r = ttm_pool_apply_caching(caching, pages,
457						   tt->caching);
458			if (r)
459				goto error_free_page;
460
461			caching = pages;
462			do {
463				r = ttm_pool_page_allocated(pool, order, p,
464							    &dma_addr,
465							    &num_pages,
466							    &pages);
467				if (r)
468					goto error_free_page;
469
470				caching = pages;
471				if (num_pages < (1 << order))
472					break;
473
474				p = ttm_pool_type_take(pt);
475			} while (p);
476		}
477
478		page_caching = ttm_cached;
479		while (num_pages >= (1 << order) &&
480		       (p = ttm_pool_alloc_page(pool, gfp_flags, order))) {
481
482			if (PageHighMem(p)) {
483				r = ttm_pool_apply_caching(caching, pages,
484							   tt->caching);
485				if (r)
486					goto error_free_page;
487				caching = pages;
488			}
489			r = ttm_pool_page_allocated(pool, order, p, &dma_addr,
490						    &num_pages, &pages);
491			if (r)
492				goto error_free_page;
493			if (PageHighMem(p))
494				caching = pages;
495		}
496
497		if (!p) {
498			if (order) {
499				--order;
500				continue;
501			}
502			r = -ENOMEM;
503			goto error_free_all;
504		}
505	}
506
507	r = ttm_pool_apply_caching(caching, pages, tt->caching);
508	if (r)
509		goto error_free_all;
510
511	return 0;
512
513error_free_page:
514	ttm_pool_free_page(pool, page_caching, order, p);
515
516error_free_all:
517	num_pages = tt->num_pages - num_pages;
518	caching_divide = caching - tt->pages;
519	ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide);
520	ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages);
521
522	return r;
523}
524EXPORT_SYMBOL(ttm_pool_alloc);
525
526/**
527 * ttm_pool_free - Free the backing pages from a ttm_tt object
528 *
529 * @pool: Pool to give pages back to.
530 * @tt: ttm_tt object to unpopulate
531 *
532 * Give the packing pages back to a pool or free them
533 */
534void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
535{
536	ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages);
537
538	while (atomic_long_read(&allocated_pages) > page_pool_size)
539		ttm_pool_shrink();
540}
541EXPORT_SYMBOL(ttm_pool_free);
542
543/**
544 * ttm_pool_init - Initialize a pool
545 *
546 * @pool: the pool to initialize
547 * @dev: device for DMA allocations and mappings
548 * @nid: NUMA node to use for allocations
549 * @use_dma_alloc: true if coherent DMA alloc should be used
550 * @use_dma32: true if GFP_DMA32 should be used
551 *
552 * Initialize the pool and its pool types.
553 */
554void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
555		   int nid, bool use_dma_alloc, bool use_dma32)
556{
557	unsigned int i, j;
558
559	WARN_ON(!dev && use_dma_alloc);
560
561	pool->dev = dev;
562	pool->nid = nid;
563	pool->use_dma_alloc = use_dma_alloc;
564	pool->use_dma32 = use_dma32;
565
566	if (use_dma_alloc || nid != NUMA_NO_NODE) {
567		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
568			for (j = 0; j <= MAX_ORDER; ++j)
569				ttm_pool_type_init(&pool->caching[i].orders[j],
570						   pool, i, j);
571	}
572}
573EXPORT_SYMBOL(ttm_pool_init);
574
575/**
576 * ttm_pool_fini - Cleanup a pool
577 *
578 * @pool: the pool to clean up
579 *
580 * Free all pages in the pool and unregister the types from the global
581 * shrinker.
582 */
583void ttm_pool_fini(struct ttm_pool *pool)
584{
585	unsigned int i, j;
586
587	if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
588		for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
589			for (j = 0; j <= MAX_ORDER; ++j)
590				ttm_pool_type_fini(&pool->caching[i].orders[j]);
591	}
592
593	/* We removed the pool types from the LRU, but we need to also make sure
594	 * that no shrinker is concurrently freeing pages from the pool.
595	 */
596	synchronize_shrinkers();
597}
598EXPORT_SYMBOL(ttm_pool_fini);
599
600/* As long as pages are available make sure to release at least one */
601static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
602					    struct shrink_control *sc)
603{
604	unsigned long num_freed = 0;
605
606	do
607		num_freed += ttm_pool_shrink();
608	while (!num_freed && atomic_long_read(&allocated_pages));
609
610	return num_freed;
611}
612
613/* Return the number of pages available or SHRINK_EMPTY if we have none */
614static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
615					     struct shrink_control *sc)
616{
617	unsigned long num_pages = atomic_long_read(&allocated_pages);
618
619	return num_pages ? num_pages : SHRINK_EMPTY;
620}
621
622#ifdef CONFIG_DEBUG_FS
623/* Count the number of pages available in a pool_type */
624static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
625{
626	unsigned int count = 0;
627	struct page *p;
628
629	spin_lock(&pt->lock);
630	/* Only used for debugfs, the overhead doesn't matter */
631	list_for_each_entry(p, &pt->pages, lru)
632		++count;
633	spin_unlock(&pt->lock);
634
635	return count;
636}
637
638/* Print a nice header for the order */
639static void ttm_pool_debugfs_header(struct seq_file *m)
640{
641	unsigned int i;
642
643	seq_puts(m, "\t ");
644	for (i = 0; i <= MAX_ORDER; ++i)
645		seq_printf(m, " ---%2u---", i);
646	seq_puts(m, "\n");
647}
648
649/* Dump information about the different pool types */
650static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
651				    struct seq_file *m)
652{
653	unsigned int i;
654
655	for (i = 0; i <= MAX_ORDER; ++i)
656		seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
657	seq_puts(m, "\n");
658}
659
660/* Dump the total amount of allocated pages */
661static void ttm_pool_debugfs_footer(struct seq_file *m)
662{
663	seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
664		   atomic_long_read(&allocated_pages), page_pool_size);
665}
666
667/* Dump the information for the global pools */
668static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
669{
670	ttm_pool_debugfs_header(m);
671
672	spin_lock(&shrinker_lock);
673	seq_puts(m, "wc\t:");
674	ttm_pool_debugfs_orders(global_write_combined, m);
675	seq_puts(m, "uc\t:");
676	ttm_pool_debugfs_orders(global_uncached, m);
677	seq_puts(m, "wc 32\t:");
678	ttm_pool_debugfs_orders(global_dma32_write_combined, m);
679	seq_puts(m, "uc 32\t:");
680	ttm_pool_debugfs_orders(global_dma32_uncached, m);
681	spin_unlock(&shrinker_lock);
682
683	ttm_pool_debugfs_footer(m);
684
685	return 0;
686}
687DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
688
689/**
690 * ttm_pool_debugfs - Debugfs dump function for a pool
691 *
692 * @pool: the pool to dump the information for
693 * @m: seq_file to dump to
694 *
695 * Make a debugfs dump with the per pool and global information.
696 */
697int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
698{
699	unsigned int i;
700
701	if (!pool->use_dma_alloc) {
702		seq_puts(m, "unused\n");
703		return 0;
704	}
705
706	ttm_pool_debugfs_header(m);
707
708	spin_lock(&shrinker_lock);
709	for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
710		seq_puts(m, "DMA ");
711		switch (i) {
712		case ttm_cached:
713			seq_puts(m, "\t:");
714			break;
715		case ttm_write_combined:
716			seq_puts(m, "wc\t:");
717			break;
718		case ttm_uncached:
719			seq_puts(m, "uc\t:");
720			break;
721		}
722		ttm_pool_debugfs_orders(pool->caching[i].orders, m);
723	}
724	spin_unlock(&shrinker_lock);
725
726	ttm_pool_debugfs_footer(m);
727	return 0;
728}
729EXPORT_SYMBOL(ttm_pool_debugfs);
730
731/* Test the shrinker functions and dump the result */
732static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
733{
734	struct shrink_control sc = { .gfp_mask = GFP_NOFS };
735
736	fs_reclaim_acquire(GFP_KERNEL);
737	seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
738		   ttm_pool_shrinker_scan(&mm_shrinker, &sc));
739	fs_reclaim_release(GFP_KERNEL);
740
741	return 0;
742}
743DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
744
745#endif
746
747/**
748 * ttm_pool_mgr_init - Initialize globals
749 *
750 * @num_pages: default number of pages
751 *
752 * Initialize the global locks and lists for the MM shrinker.
753 */
754int ttm_pool_mgr_init(unsigned long num_pages)
755{
756	unsigned int i;
757
758	if (!page_pool_size)
759		page_pool_size = num_pages;
760
761	spin_lock_init(&shrinker_lock);
762	INIT_LIST_HEAD(&shrinker_list);
763
764	for (i = 0; i <= MAX_ORDER; ++i) {
765		ttm_pool_type_init(&global_write_combined[i], NULL,
766				   ttm_write_combined, i);
767		ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
768
769		ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
770				   ttm_write_combined, i);
771		ttm_pool_type_init(&global_dma32_uncached[i], NULL,
772				   ttm_uncached, i);
773	}
774
775#ifdef CONFIG_DEBUG_FS
776	debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
777			    &ttm_pool_debugfs_globals_fops);
778	debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
779			    &ttm_pool_debugfs_shrink_fops);
780#endif
781
782	mm_shrinker.count_objects = ttm_pool_shrinker_count;
783	mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
784	mm_shrinker.seeks = 1;
785	return register_shrinker(&mm_shrinker, "drm-ttm_pool");
786}
787
788/**
789 * ttm_pool_mgr_fini - Finalize globals
790 *
791 * Cleanup the global pools and unregister the MM shrinker.
792 */
793void ttm_pool_mgr_fini(void)
794{
795	unsigned int i;
796
797	for (i = 0; i <= MAX_ORDER; ++i) {
798		ttm_pool_type_fini(&global_write_combined[i]);
799		ttm_pool_type_fini(&global_uncached[i]);
800
801		ttm_pool_type_fini(&global_dma32_write_combined[i]);
802		ttm_pool_type_fini(&global_dma32_uncached[i]);
803	}
804
805	unregister_shrinker(&mm_shrinker);
806	WARN_ON(!list_empty(&shrinker_list));
807}
808