2 * Copyright (c) Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
28 /* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
34 #define pr_fmt(fmt) "[TTM] " fmt
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/highmem.h>
39 #include <linux/mm_types.h>
40 #include <linux/module.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/dma-mapping.h>
46 #include <linux/atomic.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
51 #if IS_ENABLED(CONFIG_AGP)
55 #include <asm/set_memory.h>
58 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
59 #define SMALL_ALLOCATION 16
60 #define FREE_ALL_PAGES (~0U)
61 /* times are in msecs */
62 #define PAGE_FREE_INTERVAL 1000
65 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
67 * @lock: Protects the shared pool from concurrnet access. Must be used with
68 * irqsave/irqrestore variants because pool allocator maybe called from
70 * @fill_lock: Prevent concurrent calls to fill.
71 * @list: Pool of free uc/wc pages for fast reuse.
72 * @gfp_flags: Flags to pass for alloc_page.
73 * @npages: Number of pages in pool.
75 struct ttm_page_pool
{
78 struct list_head list
;
83 unsigned long nrefills
;
88 * Limits for the pool. They are handled without locks because only place where
89 * they may change is in sysfs store. They won't have immediate effect anyway
90 * so forcing serialization to access them is pointless.
93 struct ttm_pool_opts
{
102 * struct ttm_pool_manager - Holds memory pools for fst allocation
104 * Manager is read only object for pool code so it doesn't need locking.
106 * @free_interval: minimum number of jiffies between freeing pages from pool.
107 * @page_alloc_inited: reference counting for pool allocation.
108 * @work: Work that is used to shrink the pool. Work is only run when there is
109 * some pages to free.
110 * @small_allocation: Limit in number of pages what is small allocation.
112 * @pools: All pool objects in use.
114 struct ttm_pool_manager
{
116 struct shrinker mm_shrink
;
117 struct ttm_pool_opts options
;
120 struct ttm_page_pool pools
[NUM_POOLS
];
122 struct ttm_page_pool wc_pool
;
123 struct ttm_page_pool uc_pool
;
124 struct ttm_page_pool wc_pool_dma32
;
125 struct ttm_page_pool uc_pool_dma32
;
126 struct ttm_page_pool wc_pool_huge
;
127 struct ttm_page_pool uc_pool_huge
;
132 static struct attribute ttm_page_pool_max
= {
133 .name
= "pool_max_size",
134 .mode
= S_IRUGO
| S_IWUSR
136 static struct attribute ttm_page_pool_small
= {
137 .name
= "pool_small_allocation",
138 .mode
= S_IRUGO
| S_IWUSR
140 static struct attribute ttm_page_pool_alloc_size
= {
141 .name
= "pool_allocation_size",
142 .mode
= S_IRUGO
| S_IWUSR
145 static struct attribute
*ttm_pool_attrs
[] = {
147 &ttm_page_pool_small
,
148 &ttm_page_pool_alloc_size
,
152 static void ttm_pool_kobj_release(struct kobject
*kobj
)
154 struct ttm_pool_manager
*m
=
155 container_of(kobj
, struct ttm_pool_manager
, kobj
);
159 static ssize_t
ttm_pool_store(struct kobject
*kobj
,
160 struct attribute
*attr
, const char *buffer
, size_t size
)
162 struct ttm_pool_manager
*m
=
163 container_of(kobj
, struct ttm_pool_manager
, kobj
);
166 chars
= sscanf(buffer
, "%u", &val
);
170 /* Convert kb to number of pages */
171 val
= val
/ (PAGE_SIZE
>> 10);
173 if (attr
== &ttm_page_pool_max
)
174 m
->options
.max_size
= val
;
175 else if (attr
== &ttm_page_pool_small
)
176 m
->options
.small
= val
;
177 else if (attr
== &ttm_page_pool_alloc_size
) {
178 if (val
> NUM_PAGES_TO_ALLOC
*8) {
179 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
180 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 7),
181 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
183 } else if (val
> NUM_PAGES_TO_ALLOC
) {
184 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
185 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
187 m
->options
.alloc_size
= val
;
193 static ssize_t
ttm_pool_show(struct kobject
*kobj
,
194 struct attribute
*attr
, char *buffer
)
196 struct ttm_pool_manager
*m
=
197 container_of(kobj
, struct ttm_pool_manager
, kobj
);
200 if (attr
== &ttm_page_pool_max
)
201 val
= m
->options
.max_size
;
202 else if (attr
== &ttm_page_pool_small
)
203 val
= m
->options
.small
;
204 else if (attr
== &ttm_page_pool_alloc_size
)
205 val
= m
->options
.alloc_size
;
207 val
= val
* (PAGE_SIZE
>> 10);
209 return snprintf(buffer
, PAGE_SIZE
, "%u\n", val
);
212 static const struct sysfs_ops ttm_pool_sysfs_ops
= {
213 .show
= &ttm_pool_show
,
214 .store
= &ttm_pool_store
,
217 static struct kobj_type ttm_pool_kobj_type
= {
218 .release
= &ttm_pool_kobj_release
,
219 .sysfs_ops
= &ttm_pool_sysfs_ops
,
220 .default_attrs
= ttm_pool_attrs
,
223 static struct ttm_pool_manager
*_manager
;
226 static int set_pages_wb(struct page
*page
, int numpages
)
228 #if IS_ENABLED(CONFIG_AGP)
231 for (i
= 0; i
< numpages
; i
++)
232 unmap_page_from_agp(page
++);
237 static int set_pages_array_wb(struct page
**pages
, int addrinarray
)
239 #if IS_ENABLED(CONFIG_AGP)
242 for (i
= 0; i
< addrinarray
; i
++)
243 unmap_page_from_agp(pages
[i
]);
248 static int set_pages_array_wc(struct page
**pages
, int addrinarray
)
250 #if IS_ENABLED(CONFIG_AGP)
253 for (i
= 0; i
< addrinarray
; i
++)
254 map_page_into_agp(pages
[i
]);
259 static int set_pages_array_uc(struct page
**pages
, int addrinarray
)
261 #if IS_ENABLED(CONFIG_AGP)
264 for (i
= 0; i
< addrinarray
; i
++)
265 map_page_into_agp(pages
[i
]);
272 * Select the right pool or requested caching state and ttm flags. */
273 static struct ttm_page_pool
*ttm_get_pool(int flags
, bool huge
,
274 enum ttm_caching_state cstate
)
278 if (cstate
== tt_cached
)
286 if (flags
& TTM_PAGE_FLAG_DMA32
) {
295 return &_manager
->pools
[pool_index
];
298 /* set memory back to wb and free the pages. */
299 static void ttm_pages_put(struct page
*pages
[], unsigned npages
,
302 unsigned int i
, pages_nr
= (1 << order
);
305 if (set_pages_array_wb(pages
, npages
))
306 pr_err("Failed to set %d pages to wb!\n", npages
);
309 for (i
= 0; i
< npages
; ++i
) {
311 if (set_pages_wb(pages
[i
], pages_nr
))
312 pr_err("Failed to set %d pages to wb!\n", pages_nr
);
314 __free_pages(pages
[i
], order
);
318 static void ttm_pool_update_free_locked(struct ttm_page_pool
*pool
,
319 unsigned freed_pages
)
321 pool
->npages
-= freed_pages
;
322 pool
->nfrees
+= freed_pages
;
326 * Free pages from pool.
328 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
329 * number of pages in one go.
331 * @pool: to free the pages from
332 * @free_all: If set to true will free all pages in pool
333 * @use_static: Safe to use static buffer
335 static int ttm_page_pool_free(struct ttm_page_pool
*pool
, unsigned nr_free
,
338 static struct page
*static_buf
[NUM_PAGES_TO_ALLOC
];
339 unsigned long irq_flags
;
341 struct page
**pages_to_free
;
342 unsigned freed_pages
= 0,
343 npages_to_free
= nr_free
;
345 if (NUM_PAGES_TO_ALLOC
< nr_free
)
346 npages_to_free
= NUM_PAGES_TO_ALLOC
;
349 pages_to_free
= static_buf
;
351 pages_to_free
= kmalloc(npages_to_free
* sizeof(struct page
*),
353 if (!pages_to_free
) {
354 pr_debug("Failed to allocate memory for pool free operation\n");
359 spin_lock_irqsave(&pool
->lock
, irq_flags
);
361 list_for_each_entry_reverse(p
, &pool
->list
, lru
) {
362 if (freed_pages
>= npages_to_free
)
365 pages_to_free
[freed_pages
++] = p
;
366 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
367 if (freed_pages
>= NUM_PAGES_TO_ALLOC
) {
368 /* remove range of pages from the pool */
369 __list_del(p
->lru
.prev
, &pool
->list
);
371 ttm_pool_update_free_locked(pool
, freed_pages
);
373 * Because changing page caching is costly
374 * we unlock the pool to prevent stalling.
376 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
378 ttm_pages_put(pages_to_free
, freed_pages
, pool
->order
);
379 if (likely(nr_free
!= FREE_ALL_PAGES
))
380 nr_free
-= freed_pages
;
382 if (NUM_PAGES_TO_ALLOC
>= nr_free
)
383 npages_to_free
= nr_free
;
385 npages_to_free
= NUM_PAGES_TO_ALLOC
;
389 /* free all so restart the processing */
393 /* Not allowed to fall through or break because
394 * following context is inside spinlock while we are
402 /* remove range of pages from the pool */
404 __list_del(&p
->lru
, &pool
->list
);
406 ttm_pool_update_free_locked(pool
, freed_pages
);
407 nr_free
-= freed_pages
;
410 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
413 ttm_pages_put(pages_to_free
, freed_pages
, pool
->order
);
415 if (pages_to_free
!= static_buf
)
416 kfree(pages_to_free
);
421 * Callback for mm to request pool to reduce number of page held.
423 * XXX: (dchinner) Deadlock warning!
425 * This code is crying out for a shrinker per pool....
428 ttm_pool_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
430 static DEFINE_MUTEX(lock
);
431 static unsigned start_pool
;
433 unsigned pool_offset
;
434 struct ttm_page_pool
*pool
;
435 int shrink_pages
= sc
->nr_to_scan
;
436 unsigned long freed
= 0;
437 unsigned int nr_free_pool
;
439 if (!mutex_trylock(&lock
))
441 pool_offset
= ++start_pool
% NUM_POOLS
;
442 /* select start pool in round robin fashion */
443 for (i
= 0; i
< NUM_POOLS
; ++i
) {
444 unsigned nr_free
= shrink_pages
;
447 if (shrink_pages
== 0)
450 pool
= &_manager
->pools
[(i
+ pool_offset
)%NUM_POOLS
];
451 page_nr
= (1 << pool
->order
);
452 /* OK to use static buffer since global mutex is held. */
453 nr_free_pool
= roundup(nr_free
, page_nr
) >> pool
->order
;
454 shrink_pages
= ttm_page_pool_free(pool
, nr_free_pool
, true);
455 freed
+= (nr_free_pool
- shrink_pages
) << pool
->order
;
456 if (freed
>= sc
->nr_to_scan
)
458 shrink_pages
<<= pool
->order
;
466 ttm_pool_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
469 unsigned long count
= 0;
470 struct ttm_page_pool
*pool
;
472 for (i
= 0; i
< NUM_POOLS
; ++i
) {
473 pool
= &_manager
->pools
[i
];
474 count
+= (pool
->npages
<< pool
->order
);
480 static int ttm_pool_mm_shrink_init(struct ttm_pool_manager
*manager
)
482 manager
->mm_shrink
.count_objects
= ttm_pool_shrink_count
;
483 manager
->mm_shrink
.scan_objects
= ttm_pool_shrink_scan
;
484 manager
->mm_shrink
.seeks
= 1;
485 return register_shrinker(&manager
->mm_shrink
);
488 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager
*manager
)
490 unregister_shrinker(&manager
->mm_shrink
);
493 static int ttm_set_pages_caching(struct page
**pages
,
494 enum ttm_caching_state cstate
, unsigned cpages
)
497 /* Set page caching */
500 r
= set_pages_array_uc(pages
, cpages
);
502 pr_err("Failed to set %d pages to uc!\n", cpages
);
505 r
= set_pages_array_wc(pages
, cpages
);
507 pr_err("Failed to set %d pages to wc!\n", cpages
);
516 * Free pages the pages that failed to change the caching state. If there is
517 * any pages that have changed their caching state already put them to the
520 static void ttm_handle_caching_state_failure(struct list_head
*pages
,
521 int ttm_flags
, enum ttm_caching_state cstate
,
522 struct page
**failed_pages
, unsigned cpages
)
525 /* Failed pages have to be freed */
526 for (i
= 0; i
< cpages
; ++i
) {
527 list_del(&failed_pages
[i
]->lru
);
528 __free_page(failed_pages
[i
]);
533 * Allocate new pages with correct caching.
535 * This function is reentrant if caller updates count depending on number of
536 * pages returned in pages array.
538 static int ttm_alloc_new_pages(struct list_head
*pages
, gfp_t gfp_flags
,
539 int ttm_flags
, enum ttm_caching_state cstate
,
540 unsigned count
, unsigned order
)
542 struct page
**caching_array
;
545 unsigned i
, j
, cpages
;
546 unsigned npages
= 1 << order
;
547 unsigned max_cpages
= min(count
<< order
, (unsigned)NUM_PAGES_TO_ALLOC
);
549 /* allocate array for page caching change */
550 caching_array
= kmalloc(max_cpages
*sizeof(struct page
*), GFP_KERNEL
);
552 if (!caching_array
) {
553 pr_debug("Unable to allocate table for new pages\n");
557 for (i
= 0, cpages
= 0; i
< count
; ++i
) {
558 p
= alloc_pages(gfp_flags
, order
);
561 pr_debug("Unable to get page %u\n", i
);
563 /* store already allocated pages in the pool after
564 * setting the caching state */
566 r
= ttm_set_pages_caching(caching_array
,
569 ttm_handle_caching_state_failure(pages
,
571 caching_array
, cpages
);
577 list_add(&p
->lru
, pages
);
579 #ifdef CONFIG_HIGHMEM
580 /* gfp flags of highmem page should never be dma32 so we
581 * we should be fine in such case
587 for (j
= 0; j
< npages
; ++j
) {
588 caching_array
[cpages
++] = p
++;
589 if (cpages
== max_cpages
) {
591 r
= ttm_set_pages_caching(caching_array
,
594 ttm_handle_caching_state_failure(pages
,
596 caching_array
, cpages
);
605 r
= ttm_set_pages_caching(caching_array
, cstate
, cpages
);
607 ttm_handle_caching_state_failure(pages
,
609 caching_array
, cpages
);
612 kfree(caching_array
);
618 * Fill the given pool if there aren't enough pages and the requested number of
621 static void ttm_page_pool_fill_locked(struct ttm_page_pool
*pool
, int ttm_flags
,
622 enum ttm_caching_state cstate
,
623 unsigned count
, unsigned long *irq_flags
)
629 * Only allow one pool fill operation at a time.
630 * If pool doesn't have enough pages for the allocation new pages are
631 * allocated from outside of pool.
636 pool
->fill_lock
= true;
638 /* If allocation request is small and there are not enough
639 * pages in a pool we fill the pool up first. */
640 if (count
< _manager
->options
.small
641 && count
> pool
->npages
) {
642 struct list_head new_pages
;
643 unsigned alloc_size
= _manager
->options
.alloc_size
;
646 * Can't change page caching if in irqsave context. We have to
647 * drop the pool->lock.
649 spin_unlock_irqrestore(&pool
->lock
, *irq_flags
);
651 INIT_LIST_HEAD(&new_pages
);
652 r
= ttm_alloc_new_pages(&new_pages
, pool
->gfp_flags
, ttm_flags
,
653 cstate
, alloc_size
, 0);
654 spin_lock_irqsave(&pool
->lock
, *irq_flags
);
657 list_splice(&new_pages
, &pool
->list
);
659 pool
->npages
+= alloc_size
;
661 pr_debug("Failed to fill pool (%p)\n", pool
);
662 /* If we have any pages left put them to the pool. */
663 list_for_each_entry(p
, &new_pages
, lru
) {
666 list_splice(&new_pages
, &pool
->list
);
667 pool
->npages
+= cpages
;
671 pool
->fill_lock
= false;
675 * Allocate pages from the pool and put them on the return list.
677 * @return zero for success or negative error code.
679 static int ttm_page_pool_get_pages(struct ttm_page_pool
*pool
,
680 struct list_head
*pages
,
682 enum ttm_caching_state cstate
,
683 unsigned count
, unsigned order
)
685 unsigned long irq_flags
;
690 spin_lock_irqsave(&pool
->lock
, irq_flags
);
692 ttm_page_pool_fill_locked(pool
, ttm_flags
, cstate
, count
,
695 if (count
>= pool
->npages
) {
696 /* take all pages from the pool */
697 list_splice_init(&pool
->list
, pages
);
698 count
-= pool
->npages
;
702 /* find the last pages to include for requested number of pages. Split
703 * pool to begin and halve it to reduce search space. */
704 if (count
<= pool
->npages
/2) {
706 list_for_each(p
, &pool
->list
) {
711 i
= pool
->npages
+ 1;
712 list_for_each_prev(p
, &pool
->list
) {
717 /* Cut 'count' number of pages from the pool */
718 list_cut_position(pages
, &pool
->list
, p
);
719 pool
->npages
-= count
;
722 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
724 /* clear the pages coming from the pool if requested */
725 if (ttm_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
) {
728 list_for_each_entry(page
, pages
, lru
) {
729 if (PageHighMem(page
))
730 clear_highpage(page
);
732 clear_page(page_address(page
));
736 /* If pool didn't have enough pages allocate new one. */
738 gfp_t gfp_flags
= pool
->gfp_flags
;
740 /* set zero flag for page allocation if required */
741 if (ttm_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
742 gfp_flags
|= __GFP_ZERO
;
744 /* ttm_alloc_new_pages doesn't reference pool so we can run
745 * multiple requests in parallel.
747 r
= ttm_alloc_new_pages(pages
, gfp_flags
, ttm_flags
, cstate
,
754 /* Put all pages in pages list to correct pool to wait for reuse */
755 static void ttm_put_pages(struct page
**pages
, unsigned npages
, int flags
,
756 enum ttm_caching_state cstate
)
758 struct ttm_page_pool
*pool
= ttm_get_pool(flags
, false, cstate
);
759 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
760 struct ttm_page_pool
*huge
= ttm_get_pool(flags
, true, cstate
);
762 unsigned long irq_flags
;
766 /* No pool for this memory type so free the pages */
769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
770 struct page
*p
= pages
[i
];
772 unsigned order
= 0, j
;
779 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
780 if (!(flags
& TTM_PAGE_FLAG_DMA32
)) {
781 for (j
= 0; j
< HPAGE_PMD_NR
; ++j
)
782 if (p
++ != pages
[i
+ j
])
785 if (j
== HPAGE_PMD_NR
)
786 order
= HPAGE_PMD_ORDER
;
790 if (page_count(pages
[i
]) != 1)
791 pr_err("Erroneous page count. Leaking pages.\n");
792 __free_pages(pages
[i
], order
);
804 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
806 unsigned max_size
, n2free
;
808 spin_lock_irqsave(&huge
->lock
, irq_flags
);
810 struct page
*p
= pages
[i
];
816 for (j
= 0; j
< HPAGE_PMD_NR
; ++j
)
817 if (p
++ != pages
[i
+ j
])
820 if (j
!= HPAGE_PMD_NR
)
823 list_add_tail(&pages
[i
]->lru
, &huge
->list
);
825 for (j
= 0; j
< HPAGE_PMD_NR
; ++j
)
830 /* Check that we don't go over the pool limit */
831 max_size
= _manager
->options
.max_size
;
832 max_size
/= HPAGE_PMD_NR
;
833 if (huge
->npages
> max_size
)
834 n2free
= huge
->npages
- max_size
;
837 spin_unlock_irqrestore(&huge
->lock
, irq_flags
);
839 ttm_page_pool_free(huge
, n2free
, false);
843 spin_lock_irqsave(&pool
->lock
, irq_flags
);
846 if (page_count(pages
[i
]) != 1)
847 pr_err("Erroneous page count. Leaking pages.\n");
848 list_add_tail(&pages
[i
]->lru
, &pool
->list
);
854 /* Check that we don't go over the pool limit */
856 if (pool
->npages
> _manager
->options
.max_size
) {
857 npages
= pool
->npages
- _manager
->options
.max_size
;
858 /* free at least NUM_PAGES_TO_ALLOC number of pages
859 * to reduce calls to set_memory_wb */
860 if (npages
< NUM_PAGES_TO_ALLOC
)
861 npages
= NUM_PAGES_TO_ALLOC
;
863 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
865 ttm_page_pool_free(pool
, npages
, false);
869 * On success pages list will hold count number of correctly
872 static int ttm_get_pages(struct page
**pages
, unsigned npages
, int flags
,
873 enum ttm_caching_state cstate
)
875 struct ttm_page_pool
*pool
= ttm_get_pool(flags
, false, cstate
);
876 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
877 struct ttm_page_pool
*huge
= ttm_get_pool(flags
, true, cstate
);
879 struct list_head plist
;
880 struct page
*p
= NULL
;
881 unsigned count
, first
;
884 /* No pool for cached pages */
886 gfp_t gfp_flags
= GFP_USER
;
888 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
892 /* set zero flag for page allocation if required */
893 if (flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
894 gfp_flags
|= __GFP_ZERO
;
896 if (flags
& TTM_PAGE_FLAG_DMA32
)
897 gfp_flags
|= GFP_DMA32
;
899 gfp_flags
|= GFP_HIGHUSER
;
902 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
903 if (!(gfp_flags
& GFP_DMA32
)) {
904 while (npages
>= HPAGE_PMD_NR
) {
905 gfp_t huge_flags
= gfp_flags
;
907 huge_flags
|= GFP_TRANSHUGE_LIGHT
| __GFP_NORETRY
|
908 __GFP_KSWAPD_RECLAIM
;
909 huge_flags
&= ~__GFP_MOVABLE
;
910 huge_flags
&= ~__GFP_COMP
;
911 p
= alloc_pages(huge_flags
, HPAGE_PMD_ORDER
);
915 for (j
= 0; j
< HPAGE_PMD_NR
; ++j
)
918 npages
-= HPAGE_PMD_NR
;
925 p
= alloc_page(gfp_flags
);
927 pr_debug("Unable to allocate page\n");
931 /* Swap the pages if we detect consecutive order */
932 if (i
> first
&& pages
[i
- 1] == p
- 1)
933 swap(p
, pages
[i
- 1]);
943 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
944 if (huge
&& npages
>= HPAGE_PMD_NR
) {
945 INIT_LIST_HEAD(&plist
);
946 ttm_page_pool_get_pages(huge
, &plist
, flags
, cstate
,
947 npages
/ HPAGE_PMD_NR
,
950 list_for_each_entry(p
, &plist
, lru
) {
953 for (j
= 0; j
< HPAGE_PMD_NR
; ++j
)
954 pages
[count
++] = &p
[j
];
959 INIT_LIST_HEAD(&plist
);
960 r
= ttm_page_pool_get_pages(pool
, &plist
, flags
, cstate
,
964 list_for_each_entry(p
, &plist
, lru
) {
965 struct page
*tmp
= p
;
967 /* Swap the pages if we detect consecutive order */
968 if (count
> first
&& pages
[count
- 1] == tmp
- 1)
969 swap(tmp
, pages
[count
- 1]);
970 pages
[count
++] = tmp
;
974 /* If there is any pages in the list put them back to
977 pr_debug("Failed to allocate extra pages for large request\n");
978 ttm_put_pages(pages
, count
, flags
, cstate
);
985 static void ttm_page_pool_init_locked(struct ttm_page_pool
*pool
, gfp_t flags
,
986 char *name
, unsigned int order
)
988 spin_lock_init(&pool
->lock
);
989 pool
->fill_lock
= false;
990 INIT_LIST_HEAD(&pool
->list
);
991 pool
->npages
= pool
->nfrees
= 0;
992 pool
->gfp_flags
= flags
;
997 int ttm_page_alloc_init(struct ttm_mem_global
*glob
, unsigned max_pages
)
1000 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1001 unsigned order
= HPAGE_PMD_ORDER
;
1008 pr_info("Initializing pool allocator\n");
1010 _manager
= kzalloc(sizeof(*_manager
), GFP_KERNEL
);
1014 ttm_page_pool_init_locked(&_manager
->wc_pool
, GFP_HIGHUSER
, "wc", 0);
1016 ttm_page_pool_init_locked(&_manager
->uc_pool
, GFP_HIGHUSER
, "uc", 0);
1018 ttm_page_pool_init_locked(&_manager
->wc_pool_dma32
,
1019 GFP_USER
| GFP_DMA32
, "wc dma", 0);
1021 ttm_page_pool_init_locked(&_manager
->uc_pool_dma32
,
1022 GFP_USER
| GFP_DMA32
, "uc dma", 0);
1024 ttm_page_pool_init_locked(&_manager
->wc_pool_huge
,
1025 (GFP_TRANSHUGE_LIGHT
| __GFP_NORETRY
|
1026 __GFP_KSWAPD_RECLAIM
) &
1027 ~(__GFP_MOVABLE
| __GFP_COMP
),
1030 ttm_page_pool_init_locked(&_manager
->uc_pool_huge
,
1031 (GFP_TRANSHUGE_LIGHT
| __GFP_NORETRY
|
1032 __GFP_KSWAPD_RECLAIM
) &
1033 ~(__GFP_MOVABLE
| __GFP_COMP
)
1034 , "uc huge", order
);
1036 _manager
->options
.max_size
= max_pages
;
1037 _manager
->options
.small
= SMALL_ALLOCATION
;
1038 _manager
->options
.alloc_size
= NUM_PAGES_TO_ALLOC
;
1040 ret
= kobject_init_and_add(&_manager
->kobj
, &ttm_pool_kobj_type
,
1041 &glob
->kobj
, "pool");
1042 if (unlikely(ret
!= 0))
1045 ret
= ttm_pool_mm_shrink_init(_manager
);
1046 if (unlikely(ret
!= 0))
1051 kobject_put(&_manager
->kobj
);
1056 void ttm_page_alloc_fini(void)
1060 pr_info("Finalizing pool allocator\n");
1061 ttm_pool_mm_shrink_fini(_manager
);
1063 /* OK to use static buffer since global mutex is no longer used. */
1064 for (i
= 0; i
< NUM_POOLS
; ++i
)
1065 ttm_page_pool_free(&_manager
->pools
[i
], FREE_ALL_PAGES
, true);
1067 kobject_put(&_manager
->kobj
);
1071 int ttm_pool_populate(struct ttm_tt
*ttm
, struct ttm_operation_ctx
*ctx
)
1073 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
1077 if (ttm
->state
!= tt_unpopulated
)
1080 ret
= ttm_get_pages(ttm
->pages
, ttm
->num_pages
, ttm
->page_flags
,
1081 ttm
->caching_state
);
1082 if (unlikely(ret
!= 0)) {
1083 ttm_put_pages(ttm
->pages
, ttm
->num_pages
, ttm
->page_flags
,
1084 ttm
->caching_state
);
1088 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
1089 ret
= ttm_mem_global_alloc_page(mem_glob
, ttm
->pages
[i
],
1091 if (unlikely(ret
!= 0)) {
1092 ttm_put_pages(ttm
->pages
, ttm
->num_pages
,
1093 ttm
->page_flags
, ttm
->caching_state
);
1098 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
1099 ret
= ttm_tt_swapin(ttm
);
1100 if (unlikely(ret
!= 0)) {
1101 ttm_pool_unpopulate(ttm
);
1106 ttm
->state
= tt_unbound
;
1109 EXPORT_SYMBOL(ttm_pool_populate
);
1111 void ttm_pool_unpopulate(struct ttm_tt
*ttm
)
1115 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
1119 ttm_mem_global_free_page(ttm
->glob
->mem_glob
, ttm
->pages
[i
],
1122 ttm_put_pages(ttm
->pages
, ttm
->num_pages
, ttm
->page_flags
,
1123 ttm
->caching_state
);
1124 ttm
->state
= tt_unpopulated
;
1126 EXPORT_SYMBOL(ttm_pool_unpopulate
);
1128 int ttm_populate_and_map_pages(struct device
*dev
, struct ttm_dma_tt
*tt
,
1129 struct ttm_operation_ctx
*ctx
)
1134 r
= ttm_pool_populate(&tt
->ttm
, ctx
);
1138 for (i
= 0; i
< tt
->ttm
.num_pages
; ++i
) {
1139 struct page
*p
= tt
->ttm
.pages
[i
];
1140 size_t num_pages
= 1;
1142 for (j
= i
+ 1; j
< tt
->ttm
.num_pages
; ++j
) {
1143 if (++p
!= tt
->ttm
.pages
[j
])
1149 tt
->dma_address
[i
] = dma_map_page(dev
, tt
->ttm
.pages
[i
],
1150 0, num_pages
* PAGE_SIZE
,
1152 if (dma_mapping_error(dev
, tt
->dma_address
[i
])) {
1154 dma_unmap_page(dev
, tt
->dma_address
[i
],
1155 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
1156 tt
->dma_address
[i
] = 0;
1158 ttm_pool_unpopulate(&tt
->ttm
);
1162 for (j
= 1; j
< num_pages
; ++j
) {
1163 tt
->dma_address
[i
+ 1] = tt
->dma_address
[i
] + PAGE_SIZE
;
1169 EXPORT_SYMBOL(ttm_populate_and_map_pages
);
1171 void ttm_unmap_and_unpopulate_pages(struct device
*dev
, struct ttm_dma_tt
*tt
)
1175 for (i
= 0; i
< tt
->ttm
.num_pages
;) {
1176 struct page
*p
= tt
->ttm
.pages
[i
];
1177 size_t num_pages
= 1;
1179 if (!tt
->dma_address
[i
] || !tt
->ttm
.pages
[i
]) {
1184 for (j
= i
+ 1; j
< tt
->ttm
.num_pages
; ++j
) {
1185 if (++p
!= tt
->ttm
.pages
[j
])
1191 dma_unmap_page(dev
, tt
->dma_address
[i
], num_pages
* PAGE_SIZE
,
1196 ttm_pool_unpopulate(&tt
->ttm
);
1198 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages
);
1200 int ttm_page_alloc_debugfs(struct seq_file
*m
, void *data
)
1202 struct ttm_page_pool
*p
;
1204 char *h
[] = {"pool", "refills", "pages freed", "size"};
1206 seq_printf(m
, "No pool allocator running.\n");
1209 seq_printf(m
, "%7s %12s %13s %8s\n",
1210 h
[0], h
[1], h
[2], h
[3]);
1211 for (i
= 0; i
< NUM_POOLS
; ++i
) {
1212 p
= &_manager
->pools
[i
];
1214 seq_printf(m
, "%7s %12ld %13ld %8d\n",
1215 p
->name
, p
->nrefills
,
1216 p
->nfrees
, p
->npages
);
1220 EXPORT_SYMBOL(ttm_page_alloc_debugfs
);