2 * Copyright 2011 (c) Oracle Corp.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
36 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37 #define pr_fmt(fmt) "[TTM] " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/list.h>
41 #include <linux/seq_file.h> /* for seq_printf */
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/highmem.h>
45 #include <linux/mm_types.h>
46 #include <linux/module.h>
48 #include <linux/atomic.h>
49 #include <linux/device.h>
50 #include <linux/kthread.h>
51 #include <drm/ttm/ttm_bo_driver.h>
52 #include <drm/ttm/ttm_page_alloc.h>
57 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
58 #define SMALL_ALLOCATION 4
59 #define FREE_ALL_PAGES (~0U)
60 /* times are in msecs */
61 #define IS_UNDEFINED (0)
64 #define IS_CACHED (1<<3)
65 #define IS_DMA32 (1<<4)
71 POOL_IS_CACHED
= IS_CACHED
,
72 POOL_IS_WC_DMA32
= IS_WC
| IS_DMA32
,
73 POOL_IS_UC_DMA32
= IS_UC
| IS_DMA32
,
74 POOL_IS_CACHED_DMA32
= IS_CACHED
| IS_DMA32
,
77 * The pool structure. There are usually six pools:
78 * - generic (not restricted to DMA32):
79 * - write combined, uncached, cached.
80 * - dma32 (up to 2^32 - so up 4GB):
81 * - write combined, uncached, cached.
82 * for each 'struct device'. The 'cached' is for pages that are actively used.
83 * The other ones can be shrunk by the shrinker API if neccessary.
84 * @pools: The 'struct device->dma_pools' link.
85 * @type: Type of the pool
86 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
87 * used with irqsave/irqrestore variants because pool allocator maybe called
89 * @inuse_list: Pool of pages that are in use. The order is very important and
90 * it is in the order that the TTM pages that are put back are in.
91 * @free_list: Pool of pages that are free to be used. No order requirements.
92 * @dev: The device that is associated with these pools.
93 * @size: Size used during DMA allocation.
94 * @npages_free: Count of available pages for re-use.
95 * @npages_in_use: Count of pages that are in use.
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
98 * @gfp_flags: Flags to pass for alloc_page.
99 * @name: Name of the pool.
100 * @dev_name: Name derieved from dev - similar to how dev_info works.
101 * Used during shutdown as the dev_info during release is unavailable.
104 struct list_head pools
; /* The 'struct device->dma_pools link */
107 struct list_head inuse_list
;
108 struct list_head free_list
;
111 unsigned npages_free
;
112 unsigned npages_in_use
;
113 unsigned long nfrees
; /* Stats when shrunk. */
114 unsigned long nrefills
; /* Stats when grown. */
116 char name
[13]; /* "cached dma32" */
117 char dev_name
[64]; /* Constructed from dev */
121 * The accounting page keeping track of the allocated page along with
123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
124 * @vaddr: The virtual address of the page
125 * @dma: The bus address of the page. If the page is not allocated
126 * via the DMA API, it will be -1.
129 struct list_head page_list
;
136 * Limits for the pool. They are handled without locks because only place where
137 * they may change is in sysfs store. They won't have immediate effect anyway
138 * so forcing serialization to access them is pointless.
141 struct ttm_pool_opts
{
148 * Contains the list of all of the 'struct device' and their corresponding
149 * DMA pools. Guarded by _mutex->lock.
150 * @pools: The link to 'struct ttm_pool_manager->pools'
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
154 struct device_pools
{
155 struct list_head pools
;
157 struct dma_pool
*pool
;
161 * struct ttm_pool_manager - Holds memory pools for fast allocation
163 * @lock: Lock used when adding/removing from pools
164 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
165 * @options: Limits for the pool.
166 * @npools: Total amount of pools in existence.
167 * @shrinker: The structure used by [un|]register_shrinker
169 struct ttm_pool_manager
{
171 struct list_head pools
;
172 struct ttm_pool_opts options
;
174 struct shrinker mm_shrink
;
178 static struct ttm_pool_manager
*_manager
;
180 static struct attribute ttm_page_pool_max
= {
181 .name
= "pool_max_size",
182 .mode
= S_IRUGO
| S_IWUSR
184 static struct attribute ttm_page_pool_small
= {
185 .name
= "pool_small_allocation",
186 .mode
= S_IRUGO
| S_IWUSR
188 static struct attribute ttm_page_pool_alloc_size
= {
189 .name
= "pool_allocation_size",
190 .mode
= S_IRUGO
| S_IWUSR
193 static struct attribute
*ttm_pool_attrs
[] = {
195 &ttm_page_pool_small
,
196 &ttm_page_pool_alloc_size
,
200 static void ttm_pool_kobj_release(struct kobject
*kobj
)
202 struct ttm_pool_manager
*m
=
203 container_of(kobj
, struct ttm_pool_manager
, kobj
);
207 static ssize_t
ttm_pool_store(struct kobject
*kobj
, struct attribute
*attr
,
208 const char *buffer
, size_t size
)
210 struct ttm_pool_manager
*m
=
211 container_of(kobj
, struct ttm_pool_manager
, kobj
);
214 chars
= sscanf(buffer
, "%u", &val
);
218 /* Convert kb to number of pages */
219 val
= val
/ (PAGE_SIZE
>> 10);
221 if (attr
== &ttm_page_pool_max
)
222 m
->options
.max_size
= val
;
223 else if (attr
== &ttm_page_pool_small
)
224 m
->options
.small
= val
;
225 else if (attr
== &ttm_page_pool_alloc_size
) {
226 if (val
> NUM_PAGES_TO_ALLOC
*8) {
227 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 7),
229 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
231 } else if (val
> NUM_PAGES_TO_ALLOC
) {
232 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233 NUM_PAGES_TO_ALLOC
*(PAGE_SIZE
>> 10));
235 m
->options
.alloc_size
= val
;
241 static ssize_t
ttm_pool_show(struct kobject
*kobj
, struct attribute
*attr
,
244 struct ttm_pool_manager
*m
=
245 container_of(kobj
, struct ttm_pool_manager
, kobj
);
248 if (attr
== &ttm_page_pool_max
)
249 val
= m
->options
.max_size
;
250 else if (attr
== &ttm_page_pool_small
)
251 val
= m
->options
.small
;
252 else if (attr
== &ttm_page_pool_alloc_size
)
253 val
= m
->options
.alloc_size
;
255 val
= val
* (PAGE_SIZE
>> 10);
257 return snprintf(buffer
, PAGE_SIZE
, "%u\n", val
);
260 static const struct sysfs_ops ttm_pool_sysfs_ops
= {
261 .show
= &ttm_pool_show
,
262 .store
= &ttm_pool_store
,
265 static struct kobj_type ttm_pool_kobj_type
= {
266 .release
= &ttm_pool_kobj_release
,
267 .sysfs_ops
= &ttm_pool_sysfs_ops
,
268 .default_attrs
= ttm_pool_attrs
,
272 static int set_pages_array_wb(struct page
**pages
, int addrinarray
)
277 for (i
= 0; i
< addrinarray
; i
++)
278 unmap_page_from_agp(pages
[i
]);
283 static int set_pages_array_wc(struct page
**pages
, int addrinarray
)
288 for (i
= 0; i
< addrinarray
; i
++)
289 map_page_into_agp(pages
[i
]);
294 static int set_pages_array_uc(struct page
**pages
, int addrinarray
)
299 for (i
= 0; i
< addrinarray
; i
++)
300 map_page_into_agp(pages
[i
]);
304 #endif /* for !CONFIG_X86 */
306 static int ttm_set_pages_caching(struct dma_pool
*pool
,
307 struct page
**pages
, unsigned cpages
)
310 /* Set page caching */
311 if (pool
->type
& IS_UC
) {
312 r
= set_pages_array_uc(pages
, cpages
);
314 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool
->dev_name
, cpages
);
317 if (pool
->type
& IS_WC
) {
318 r
= set_pages_array_wc(pages
, cpages
);
320 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool
->dev_name
, cpages
);
326 static void __ttm_dma_free_page(struct dma_pool
*pool
, struct dma_page
*d_page
)
328 dma_addr_t dma
= d_page
->dma
;
329 dma_free_coherent(pool
->dev
, pool
->size
, d_page
->vaddr
, dma
);
334 static struct dma_page
*__ttm_dma_alloc_page(struct dma_pool
*pool
)
336 struct dma_page
*d_page
;
338 d_page
= kmalloc(sizeof(struct dma_page
), GFP_KERNEL
);
342 d_page
->vaddr
= dma_alloc_coherent(pool
->dev
, pool
->size
,
346 d_page
->p
= virt_to_page(d_page
->vaddr
);
353 static enum pool_type
ttm_to_type(int flags
, enum ttm_caching_state cstate
)
355 enum pool_type type
= IS_UNDEFINED
;
357 if (flags
& TTM_PAGE_FLAG_DMA32
)
359 if (cstate
== tt_cached
)
361 else if (cstate
== tt_uncached
)
369 static void ttm_pool_update_free_locked(struct dma_pool
*pool
,
370 unsigned freed_pages
)
372 pool
->npages_free
-= freed_pages
;
373 pool
->nfrees
+= freed_pages
;
377 /* set memory back to wb and free the pages. */
378 static void ttm_dma_pages_put(struct dma_pool
*pool
, struct list_head
*d_pages
,
379 struct page
*pages
[], unsigned npages
)
381 struct dma_page
*d_page
, *tmp
;
383 /* Don't set WB on WB page pool. */
384 if (npages
&& !(pool
->type
& IS_CACHED
) &&
385 set_pages_array_wb(pages
, npages
))
386 pr_err("%s: Failed to set %d pages to wb!\n",
387 pool
->dev_name
, npages
);
389 list_for_each_entry_safe(d_page
, tmp
, d_pages
, page_list
) {
390 list_del(&d_page
->page_list
);
391 __ttm_dma_free_page(pool
, d_page
);
395 static void ttm_dma_page_put(struct dma_pool
*pool
, struct dma_page
*d_page
)
397 /* Don't set WB on WB page pool. */
398 if (!(pool
->type
& IS_CACHED
) && set_pages_array_wb(&d_page
->p
, 1))
399 pr_err("%s: Failed to set %d pages to wb!\n",
402 list_del(&d_page
->page_list
);
403 __ttm_dma_free_page(pool
, d_page
);
407 * Free pages from pool.
409 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
410 * number of pages in one go.
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
414 * @use_static: Safe to use static buffer
416 static unsigned ttm_dma_page_pool_free(struct dma_pool
*pool
, unsigned nr_free
,
419 static struct page
*static_buf
[NUM_PAGES_TO_ALLOC
];
420 unsigned long irq_flags
;
421 struct dma_page
*dma_p
, *tmp
;
422 struct page
**pages_to_free
;
423 struct list_head d_pages
;
424 unsigned freed_pages
= 0,
425 npages_to_free
= nr_free
;
427 if (NUM_PAGES_TO_ALLOC
< nr_free
)
428 npages_to_free
= NUM_PAGES_TO_ALLOC
;
431 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
432 pool
->dev_name
, pool
->name
, current
->pid
,
433 npages_to_free
, nr_free
);
437 pages_to_free
= static_buf
;
439 pages_to_free
= kmalloc(npages_to_free
* sizeof(struct page
*),
442 if (!pages_to_free
) {
443 pr_err("%s: Failed to allocate memory for pool free operation\n",
447 INIT_LIST_HEAD(&d_pages
);
449 spin_lock_irqsave(&pool
->lock
, irq_flags
);
451 /* We picking the oldest ones off the list */
452 list_for_each_entry_safe_reverse(dma_p
, tmp
, &pool
->free_list
,
454 if (freed_pages
>= npages_to_free
)
457 /* Move the dma_page from one list to another. */
458 list_move(&dma_p
->page_list
, &d_pages
);
460 pages_to_free
[freed_pages
++] = dma_p
->p
;
461 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
462 if (freed_pages
>= NUM_PAGES_TO_ALLOC
) {
464 ttm_pool_update_free_locked(pool
, freed_pages
);
466 * Because changing page caching is costly
467 * we unlock the pool to prevent stalling.
469 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
471 ttm_dma_pages_put(pool
, &d_pages
, pages_to_free
,
474 INIT_LIST_HEAD(&d_pages
);
476 if (likely(nr_free
!= FREE_ALL_PAGES
))
477 nr_free
-= freed_pages
;
479 if (NUM_PAGES_TO_ALLOC
>= nr_free
)
480 npages_to_free
= nr_free
;
482 npages_to_free
= NUM_PAGES_TO_ALLOC
;
486 /* free all so restart the processing */
490 /* Not allowed to fall through or break because
491 * following context is inside spinlock while we are
499 /* remove range of pages from the pool */
501 ttm_pool_update_free_locked(pool
, freed_pages
);
502 nr_free
-= freed_pages
;
505 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
508 ttm_dma_pages_put(pool
, &d_pages
, pages_to_free
, freed_pages
);
510 if (pages_to_free
!= static_buf
)
511 kfree(pages_to_free
);
515 static void ttm_dma_free_pool(struct device
*dev
, enum pool_type type
)
517 struct device_pools
*p
;
518 struct dma_pool
*pool
;
523 mutex_lock(&_manager
->lock
);
524 list_for_each_entry_reverse(p
, &_manager
->pools
, pools
) {
528 if (pool
->type
!= type
)
536 list_for_each_entry_reverse(pool
, &dev
->dma_pools
, pools
) {
537 if (pool
->type
!= type
)
539 /* Takes a spinlock.. */
540 /* OK to use static buffer since global mutex is held. */
541 ttm_dma_page_pool_free(pool
, FREE_ALL_PAGES
, true);
542 WARN_ON(((pool
->npages_in_use
+ pool
->npages_free
) != 0));
543 /* This code path is called after _all_ references to the
544 * struct device has been dropped - so nobody should be
545 * touching it. In case somebody is trying to _add_ we are
546 * guarded by the mutex. */
547 list_del(&pool
->pools
);
551 mutex_unlock(&_manager
->lock
);
555 * On free-ing of the 'struct device' this deconstructor is run.
556 * Albeit the pool might have already been freed earlier.
558 static void ttm_dma_pool_release(struct device
*dev
, void *res
)
560 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
563 ttm_dma_free_pool(dev
, pool
->type
);
566 static int ttm_dma_pool_match(struct device
*dev
, void *res
, void *match_data
)
568 return *(struct dma_pool
**)res
== match_data
;
571 static struct dma_pool
*ttm_dma_pool_init(struct device
*dev
, gfp_t flags
,
574 char *n
[] = {"wc", "uc", "cached", " dma32", "unknown",};
575 enum pool_type t
[] = {IS_WC
, IS_UC
, IS_CACHED
, IS_DMA32
, IS_UNDEFINED
};
576 struct device_pools
*sec_pool
= NULL
;
577 struct dma_pool
*pool
= NULL
, **ptr
;
585 ptr
= devres_alloc(ttm_dma_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
591 pool
= kmalloc_node(sizeof(struct dma_pool
), GFP_KERNEL
,
596 sec_pool
= kmalloc_node(sizeof(struct device_pools
), GFP_KERNEL
,
601 INIT_LIST_HEAD(&sec_pool
->pools
);
603 sec_pool
->pool
= pool
;
605 INIT_LIST_HEAD(&pool
->free_list
);
606 INIT_LIST_HEAD(&pool
->inuse_list
);
607 INIT_LIST_HEAD(&pool
->pools
);
608 spin_lock_init(&pool
->lock
);
610 pool
->npages_free
= pool
->npages_in_use
= 0;
612 pool
->gfp_flags
= flags
;
613 pool
->size
= PAGE_SIZE
;
617 for (i
= 0; i
< 5; i
++) {
619 p
+= snprintf(p
, sizeof(pool
->name
) - (p
- pool
->name
),
624 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
625 * - the kobj->name has already been deallocated.*/
626 snprintf(pool
->dev_name
, sizeof(pool
->dev_name
), "%s %s",
627 dev_driver_string(dev
), dev_name(dev
));
628 mutex_lock(&_manager
->lock
);
629 /* You can get the dma_pool from either the global: */
630 list_add(&sec_pool
->pools
, &_manager
->pools
);
632 /* or from 'struct device': */
633 list_add(&pool
->pools
, &dev
->dma_pools
);
634 mutex_unlock(&_manager
->lock
);
637 devres_add(dev
, ptr
);
647 static struct dma_pool
*ttm_dma_find_pool(struct device
*dev
,
650 struct dma_pool
*pool
, *tmp
, *found
= NULL
;
652 if (type
== IS_UNDEFINED
)
655 /* NB: We iterate on the 'struct dev' which has no spinlock, but
656 * it does have a kref which we have taken. The kref is taken during
657 * graphic driver loading - in the drm_pci_init it calls either
658 * pci_dev_get or pci_register_driver which both end up taking a kref
659 * on 'struct device'.
661 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
662 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
663 * thing is at that point of time there are no pages associated with the
664 * driver so this function will not be called.
666 list_for_each_entry_safe(pool
, tmp
, &dev
->dma_pools
, pools
) {
667 if (pool
->type
!= type
)
676 * Free pages the pages that failed to change the caching state. If there
677 * are pages that have changed their caching state already put them to the
680 static void ttm_dma_handle_caching_state_failure(struct dma_pool
*pool
,
681 struct list_head
*d_pages
,
682 struct page
**failed_pages
,
685 struct dma_page
*d_page
, *tmp
;
692 /* Find the failed page. */
693 list_for_each_entry_safe(d_page
, tmp
, d_pages
, page_list
) {
696 /* .. and then progress over the full list. */
697 list_del(&d_page
->page_list
);
698 __ttm_dma_free_page(pool
, d_page
);
708 * Allocate 'count' pages, and put 'need' number of them on the
709 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
710 * The full list of pages should also be on 'd_pages'.
711 * We return zero for success, and negative numbers as errors.
713 static int ttm_dma_pool_alloc_new_pages(struct dma_pool
*pool
,
714 struct list_head
*d_pages
,
717 struct page
**caching_array
;
718 struct dma_page
*dma_p
;
722 unsigned max_cpages
= min(count
,
723 (unsigned)(PAGE_SIZE
/sizeof(struct page
*)));
725 /* allocate array for page caching change */
726 caching_array
= kmalloc(max_cpages
*sizeof(struct page
*), GFP_KERNEL
);
728 if (!caching_array
) {
729 pr_err("%s: Unable to allocate table for new pages\n",
735 pr_debug("%s: (%s:%d) Getting %d pages\n",
736 pool
->dev_name
, pool
->name
, current
->pid
, count
);
739 for (i
= 0, cpages
= 0; i
< count
; ++i
) {
740 dma_p
= __ttm_dma_alloc_page(pool
);
742 pr_err("%s: Unable to get page %u\n",
745 /* store already allocated pages in the pool after
746 * setting the caching state */
748 r
= ttm_set_pages_caching(pool
, caching_array
,
751 ttm_dma_handle_caching_state_failure(
752 pool
, d_pages
, caching_array
,
759 #ifdef CONFIG_HIGHMEM
760 /* gfp flags of highmem page should never be dma32 so we
761 * we should be fine in such case
766 caching_array
[cpages
++] = p
;
767 if (cpages
== max_cpages
) {
768 /* Note: Cannot hold the spinlock */
769 r
= ttm_set_pages_caching(pool
, caching_array
,
772 ttm_dma_handle_caching_state_failure(
773 pool
, d_pages
, caching_array
,
780 list_add(&dma_p
->page_list
, d_pages
);
784 r
= ttm_set_pages_caching(pool
, caching_array
, cpages
);
786 ttm_dma_handle_caching_state_failure(pool
, d_pages
,
787 caching_array
, cpages
);
790 kfree(caching_array
);
795 * @return count of pages still required to fulfill the request.
797 static int ttm_dma_page_pool_fill_locked(struct dma_pool
*pool
,
798 unsigned long *irq_flags
)
800 unsigned count
= _manager
->options
.small
;
801 int r
= pool
->npages_free
;
803 if (count
> pool
->npages_free
) {
804 struct list_head d_pages
;
806 INIT_LIST_HEAD(&d_pages
);
808 spin_unlock_irqrestore(&pool
->lock
, *irq_flags
);
810 /* Returns how many more are neccessary to fulfill the
812 r
= ttm_dma_pool_alloc_new_pages(pool
, &d_pages
, count
);
814 spin_lock_irqsave(&pool
->lock
, *irq_flags
);
816 /* Add the fresh to the end.. */
817 list_splice(&d_pages
, &pool
->free_list
);
819 pool
->npages_free
+= count
;
822 struct dma_page
*d_page
;
825 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
826 pool
->dev_name
, pool
->name
, r
);
828 list_for_each_entry(d_page
, &d_pages
, page_list
) {
831 list_splice_tail(&d_pages
, &pool
->free_list
);
832 pool
->npages_free
+= cpages
;
840 * @return count of pages still required to fulfill the request.
841 * The populate list is actually a stack (not that is matters as TTM
842 * allocates one page at a time.
844 static int ttm_dma_pool_get_pages(struct dma_pool
*pool
,
845 struct ttm_dma_tt
*ttm_dma
,
848 struct dma_page
*d_page
;
849 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
850 unsigned long irq_flags
;
851 int count
, r
= -ENOMEM
;
853 spin_lock_irqsave(&pool
->lock
, irq_flags
);
854 count
= ttm_dma_page_pool_fill_locked(pool
, &irq_flags
);
856 d_page
= list_first_entry(&pool
->free_list
, struct dma_page
, page_list
);
857 ttm
->pages
[index
] = d_page
->p
;
858 ttm_dma
->cpu_address
[index
] = d_page
->vaddr
;
859 ttm_dma
->dma_address
[index
] = d_page
->dma
;
860 list_move_tail(&d_page
->page_list
, &ttm_dma
->pages_list
);
862 pool
->npages_in_use
+= 1;
863 pool
->npages_free
-= 1;
865 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
870 * On success pages list will hold count number of correctly
871 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
873 int ttm_dma_populate(struct ttm_dma_tt
*ttm_dma
, struct device
*dev
)
875 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
876 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
877 struct dma_pool
*pool
;
883 if (ttm
->state
!= tt_unpopulated
)
886 type
= ttm_to_type(ttm
->page_flags
, ttm
->caching_state
);
887 if (ttm
->page_flags
& TTM_PAGE_FLAG_DMA32
)
888 gfp_flags
= GFP_USER
| GFP_DMA32
;
890 gfp_flags
= GFP_HIGHUSER
;
891 if (ttm
->page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
892 gfp_flags
|= __GFP_ZERO
;
894 pool
= ttm_dma_find_pool(dev
, type
);
896 pool
= ttm_dma_pool_init(dev
, gfp_flags
, type
);
897 if (IS_ERR_OR_NULL(pool
)) {
902 INIT_LIST_HEAD(&ttm_dma
->pages_list
);
903 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
904 ret
= ttm_dma_pool_get_pages(pool
, ttm_dma
, i
);
906 ttm_dma_unpopulate(ttm_dma
, dev
);
910 ret
= ttm_mem_global_alloc_page(mem_glob
, ttm
->pages
[i
],
912 if (unlikely(ret
!= 0)) {
913 ttm_dma_unpopulate(ttm_dma
, dev
);
918 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
919 ret
= ttm_tt_swapin(ttm
);
920 if (unlikely(ret
!= 0)) {
921 ttm_dma_unpopulate(ttm_dma
, dev
);
926 ttm
->state
= tt_unbound
;
929 EXPORT_SYMBOL_GPL(ttm_dma_populate
);
931 /* Put all pages in pages list to correct pool to wait for reuse */
932 void ttm_dma_unpopulate(struct ttm_dma_tt
*ttm_dma
, struct device
*dev
)
934 struct ttm_tt
*ttm
= &ttm_dma
->ttm
;
935 struct dma_pool
*pool
;
936 struct dma_page
*d_page
, *next
;
938 bool is_cached
= false;
939 unsigned count
= 0, i
, npages
= 0;
940 unsigned long irq_flags
;
942 type
= ttm_to_type(ttm
->page_flags
, ttm
->caching_state
);
943 pool
= ttm_dma_find_pool(dev
, type
);
947 is_cached
= (ttm_dma_find_pool(pool
->dev
,
948 ttm_to_type(ttm
->page_flags
, tt_cached
)) == pool
);
950 /* make sure pages array match list and count number of pages */
951 list_for_each_entry(d_page
, &ttm_dma
->pages_list
, page_list
) {
952 ttm
->pages
[count
] = d_page
->p
;
956 spin_lock_irqsave(&pool
->lock
, irq_flags
);
957 pool
->npages_in_use
-= count
;
959 pool
->nfrees
+= count
;
961 pool
->npages_free
+= count
;
962 list_splice(&ttm_dma
->pages_list
, &pool
->free_list
);
964 if (pool
->npages_free
> _manager
->options
.max_size
) {
965 npages
= pool
->npages_free
- _manager
->options
.max_size
;
966 /* free at least NUM_PAGES_TO_ALLOC number of pages
967 * to reduce calls to set_memory_wb */
968 if (npages
< NUM_PAGES_TO_ALLOC
)
969 npages
= NUM_PAGES_TO_ALLOC
;
972 spin_unlock_irqrestore(&pool
->lock
, irq_flags
);
975 list_for_each_entry_safe(d_page
, next
, &ttm_dma
->pages_list
, page_list
) {
976 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
978 ttm_dma_page_put(pool
, d_page
);
981 for (i
= 0; i
< count
; i
++) {
982 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
987 INIT_LIST_HEAD(&ttm_dma
->pages_list
);
988 for (i
= 0; i
< ttm
->num_pages
; i
++) {
989 ttm
->pages
[i
] = NULL
;
990 ttm_dma
->cpu_address
[i
] = 0;
991 ttm_dma
->dma_address
[i
] = 0;
994 /* shrink pool if necessary (only on !is_cached pools)*/
996 ttm_dma_page_pool_free(pool
, npages
, false);
997 ttm
->state
= tt_unpopulated
;
999 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate
);
1002 * Callback for mm to request pool to reduce number of page held.
1004 * XXX: (dchinner) Deadlock warning!
1006 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1009 static unsigned long
1010 ttm_dma_pool_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1012 static unsigned start_pool
;
1014 unsigned pool_offset
;
1015 unsigned shrink_pages
= sc
->nr_to_scan
;
1016 struct device_pools
*p
;
1017 unsigned long freed
= 0;
1019 if (list_empty(&_manager
->pools
))
1022 if (!mutex_trylock(&_manager
->lock
))
1024 if (!_manager
->npools
)
1026 pool_offset
= ++start_pool
% _manager
->npools
;
1027 list_for_each_entry(p
, &_manager
->pools
, pools
) {
1032 if (shrink_pages
== 0)
1034 /* Do it in round-robin fashion. */
1035 if (++idx
< pool_offset
)
1037 nr_free
= shrink_pages
;
1038 /* OK to use static buffer since global mutex is held. */
1039 shrink_pages
= ttm_dma_page_pool_free(p
->pool
, nr_free
, true);
1040 freed
+= nr_free
- shrink_pages
;
1042 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1043 p
->pool
->dev_name
, p
->pool
->name
, current
->pid
,
1044 nr_free
, shrink_pages
);
1047 mutex_unlock(&_manager
->lock
);
1051 static unsigned long
1052 ttm_dma_pool_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1054 struct device_pools
*p
;
1055 unsigned long count
= 0;
1057 if (!mutex_trylock(&_manager
->lock
))
1059 list_for_each_entry(p
, &_manager
->pools
, pools
)
1060 count
+= p
->pool
->npages_free
;
1061 mutex_unlock(&_manager
->lock
);
1065 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager
*manager
)
1067 manager
->mm_shrink
.count_objects
= ttm_dma_pool_shrink_count
;
1068 manager
->mm_shrink
.scan_objects
= &ttm_dma_pool_shrink_scan
;
1069 manager
->mm_shrink
.seeks
= 1;
1070 register_shrinker(&manager
->mm_shrink
);
1073 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager
*manager
)
1075 unregister_shrinker(&manager
->mm_shrink
);
1078 int ttm_dma_page_alloc_init(struct ttm_mem_global
*glob
, unsigned max_pages
)
1084 pr_info("Initializing DMA pool allocator\n");
1086 _manager
= kzalloc(sizeof(*_manager
), GFP_KERNEL
);
1090 mutex_init(&_manager
->lock
);
1091 INIT_LIST_HEAD(&_manager
->pools
);
1093 _manager
->options
.max_size
= max_pages
;
1094 _manager
->options
.small
= SMALL_ALLOCATION
;
1095 _manager
->options
.alloc_size
= NUM_PAGES_TO_ALLOC
;
1097 /* This takes care of auto-freeing the _manager */
1098 ret
= kobject_init_and_add(&_manager
->kobj
, &ttm_pool_kobj_type
,
1099 &glob
->kobj
, "dma_pool");
1100 if (unlikely(ret
!= 0)) {
1101 kobject_put(&_manager
->kobj
);
1104 ttm_dma_pool_mm_shrink_init(_manager
);
1110 void ttm_dma_page_alloc_fini(void)
1112 struct device_pools
*p
, *t
;
1114 pr_info("Finalizing DMA pool allocator\n");
1115 ttm_dma_pool_mm_shrink_fini(_manager
);
1117 list_for_each_entry_safe_reverse(p
, t
, &_manager
->pools
, pools
) {
1118 dev_dbg(p
->dev
, "(%s:%d) Freeing.\n", p
->pool
->name
,
1120 WARN_ON(devres_destroy(p
->dev
, ttm_dma_pool_release
,
1121 ttm_dma_pool_match
, p
->pool
));
1122 ttm_dma_free_pool(p
->dev
, p
->pool
->type
);
1124 kobject_put(&_manager
->kobj
);
1128 int ttm_dma_page_alloc_debugfs(struct seq_file
*m
, void *data
)
1130 struct device_pools
*p
;
1131 struct dma_pool
*pool
= NULL
;
1132 char *h
[] = {"pool", "refills", "pages freed", "inuse", "available",
1133 "name", "virt", "busaddr"};
1136 seq_printf(m
, "No pool allocator running.\n");
1139 seq_printf(m
, "%13s %12s %13s %8s %8s %8s\n",
1140 h
[0], h
[1], h
[2], h
[3], h
[4], h
[5]);
1141 mutex_lock(&_manager
->lock
);
1142 list_for_each_entry(p
, &_manager
->pools
, pools
) {
1143 struct device
*dev
= p
->dev
;
1147 seq_printf(m
, "%13s %12ld %13ld %8d %8d %8s\n",
1148 pool
->name
, pool
->nrefills
,
1149 pool
->nfrees
, pool
->npages_in_use
,
1153 mutex_unlock(&_manager
->lock
);
1156 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs
);