1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #define pr_fmt(fmt) "[TTM] " fmt
30 #include <drm/ttm/ttm_memory.h>
31 #include <drm/ttm/ttm_module.h>
32 #include <drm/ttm/ttm_page_alloc.h>
33 #include <linux/spinlock.h>
34 #include <linux/sched.h>
35 #include <linux/wait.h>
37 #include <linux/module.h>
38 #include <linux/slab.h>
40 #define TTM_MEMORY_ALLOC_RETRIES 4
44 struct ttm_mem_global
*glob
;
53 static struct attribute ttm_mem_sys
= {
54 .name
= "zone_memory",
57 static struct attribute ttm_mem_emer
= {
58 .name
= "emergency_memory",
59 .mode
= S_IRUGO
| S_IWUSR
61 static struct attribute ttm_mem_max
= {
62 .name
= "available_memory",
63 .mode
= S_IRUGO
| S_IWUSR
65 static struct attribute ttm_mem_swap
= {
67 .mode
= S_IRUGO
| S_IWUSR
69 static struct attribute ttm_mem_used
= {
70 .name
= "used_memory",
74 static void ttm_mem_zone_kobj_release(struct kobject
*kobj
)
76 struct ttm_mem_zone
*zone
=
77 container_of(kobj
, struct ttm_mem_zone
, kobj
);
79 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
80 zone
->name
, (unsigned long long)zone
->used_mem
>> 10);
84 static ssize_t
ttm_mem_zone_show(struct kobject
*kobj
,
85 struct attribute
*attr
,
88 struct ttm_mem_zone
*zone
=
89 container_of(kobj
, struct ttm_mem_zone
, kobj
);
92 spin_lock(&zone
->glob
->lock
);
93 if (attr
== &ttm_mem_sys
)
95 else if (attr
== &ttm_mem_emer
)
97 else if (attr
== &ttm_mem_max
)
99 else if (attr
== &ttm_mem_swap
)
100 val
= zone
->swap_limit
;
101 else if (attr
== &ttm_mem_used
)
102 val
= zone
->used_mem
;
103 spin_unlock(&zone
->glob
->lock
);
105 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
106 (unsigned long long) val
>> 10);
109 static void ttm_check_swapping(struct ttm_mem_global
*glob
);
111 static ssize_t
ttm_mem_zone_store(struct kobject
*kobj
,
112 struct attribute
*attr
,
116 struct ttm_mem_zone
*zone
=
117 container_of(kobj
, struct ttm_mem_zone
, kobj
);
122 chars
= sscanf(buffer
, "%lu", &val
);
129 spin_lock(&zone
->glob
->lock
);
130 if (val64
> zone
->zone_mem
)
131 val64
= zone
->zone_mem
;
132 if (attr
== &ttm_mem_emer
) {
133 zone
->emer_mem
= val64
;
134 if (zone
->max_mem
> val64
)
135 zone
->max_mem
= val64
;
136 } else if (attr
== &ttm_mem_max
) {
137 zone
->max_mem
= val64
;
138 if (zone
->emer_mem
< val64
)
139 zone
->emer_mem
= val64
;
140 } else if (attr
== &ttm_mem_swap
)
141 zone
->swap_limit
= val64
;
142 spin_unlock(&zone
->glob
->lock
);
144 ttm_check_swapping(zone
->glob
);
149 static struct attribute
*ttm_mem_zone_attrs
[] = {
158 static const struct sysfs_ops ttm_mem_zone_ops
= {
159 .show
= &ttm_mem_zone_show
,
160 .store
= &ttm_mem_zone_store
163 static struct kobj_type ttm_mem_zone_kobj_type
= {
164 .release
= &ttm_mem_zone_kobj_release
,
165 .sysfs_ops
= &ttm_mem_zone_ops
,
166 .default_attrs
= ttm_mem_zone_attrs
,
169 static void ttm_mem_global_kobj_release(struct kobject
*kobj
)
171 struct ttm_mem_global
*glob
=
172 container_of(kobj
, struct ttm_mem_global
, kobj
);
177 static struct kobj_type ttm_mem_glob_kobj_type
= {
178 .release
= &ttm_mem_global_kobj_release
,
181 static bool ttm_zones_above_swap_target(struct ttm_mem_global
*glob
,
182 bool from_wq
, uint64_t extra
)
185 struct ttm_mem_zone
*zone
;
188 for (i
= 0; i
< glob
->num_zones
; ++i
) {
189 zone
= glob
->zones
[i
];
192 target
= zone
->swap_limit
;
193 else if (capable(CAP_SYS_ADMIN
))
194 target
= zone
->emer_mem
;
196 target
= zone
->max_mem
;
198 target
= (extra
> target
) ? 0ULL : target
;
200 if (zone
->used_mem
> target
)
207 * At this point we only support a single shrink callback.
208 * Extend this if needed, perhaps using a linked list of callbacks.
209 * Note that this function is reentrant:
210 * many threads may try to swap out at any given time.
213 static void ttm_shrink(struct ttm_mem_global
*glob
, bool from_wq
,
217 struct ttm_mem_shrink
*shrink
;
219 spin_lock(&glob
->lock
);
220 if (glob
->shrink
== NULL
)
223 while (ttm_zones_above_swap_target(glob
, from_wq
, extra
)) {
224 shrink
= glob
->shrink
;
225 spin_unlock(&glob
->lock
);
226 ret
= shrink
->do_shrink(shrink
);
227 spin_lock(&glob
->lock
);
228 if (unlikely(ret
!= 0))
232 spin_unlock(&glob
->lock
);
237 static void ttm_shrink_work(struct work_struct
*work
)
239 struct ttm_mem_global
*glob
=
240 container_of(work
, struct ttm_mem_global
, work
);
242 ttm_shrink(glob
, true, 0ULL);
245 static int ttm_mem_init_kernel_zone(struct ttm_mem_global
*glob
,
246 const struct sysinfo
*si
)
248 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
255 mem
= si
->totalram
- si
->totalhigh
;
258 zone
->name
= "kernel";
259 zone
->zone_mem
= mem
;
260 zone
->max_mem
= mem
>> 1;
261 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
262 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
265 glob
->zone_kernel
= zone
;
266 ret
= kobject_init_and_add(
267 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
268 if (unlikely(ret
!= 0)) {
269 kobject_put(&zone
->kobj
);
272 glob
->zones
[glob
->num_zones
++] = zone
;
276 #ifdef CONFIG_HIGHMEM
277 static int ttm_mem_init_highmem_zone(struct ttm_mem_global
*glob
,
278 const struct sysinfo
*si
)
280 struct ttm_mem_zone
*zone
;
284 if (si
->totalhigh
== 0)
287 zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
294 zone
->name
= "highmem";
295 zone
->zone_mem
= mem
;
296 zone
->max_mem
= mem
>> 1;
297 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
298 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
301 glob
->zone_highmem
= zone
;
302 ret
= kobject_init_and_add(
303 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, "%s",
305 if (unlikely(ret
!= 0)) {
306 kobject_put(&zone
->kobj
);
309 glob
->zones
[glob
->num_zones
++] = zone
;
313 static int ttm_mem_init_dma32_zone(struct ttm_mem_global
*glob
,
314 const struct sysinfo
*si
)
316 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
327 * No special dma32 zone needed.
330 if (mem
<= ((uint64_t) 1ULL << 32)) {
336 * Limit max dma32 memory to 4GB for now
337 * until we can figure out how big this
341 mem
= ((uint64_t) 1ULL << 32);
342 zone
->name
= "dma32";
343 zone
->zone_mem
= mem
;
344 zone
->max_mem
= mem
>> 1;
345 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
346 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
349 glob
->zone_dma32
= zone
;
350 ret
= kobject_init_and_add(
351 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
352 if (unlikely(ret
!= 0)) {
353 kobject_put(&zone
->kobj
);
356 glob
->zones
[glob
->num_zones
++] = zone
;
361 int ttm_mem_global_init(struct ttm_mem_global
*glob
)
366 struct ttm_mem_zone
*zone
;
368 spin_lock_init(&glob
->lock
);
369 glob
->swap_queue
= create_singlethread_workqueue("ttm_swap");
370 INIT_WORK(&glob
->work
, ttm_shrink_work
);
371 ret
= kobject_init_and_add(
372 &glob
->kobj
, &ttm_mem_glob_kobj_type
, ttm_get_kobj(), "memory_accounting");
373 if (unlikely(ret
!= 0)) {
374 kobject_put(&glob
->kobj
);
380 ret
= ttm_mem_init_kernel_zone(glob
, &si
);
381 if (unlikely(ret
!= 0))
383 #ifdef CONFIG_HIGHMEM
384 ret
= ttm_mem_init_highmem_zone(glob
, &si
);
385 if (unlikely(ret
!= 0))
388 ret
= ttm_mem_init_dma32_zone(glob
, &si
);
389 if (unlikely(ret
!= 0))
392 for (i
= 0; i
< glob
->num_zones
; ++i
) {
393 zone
= glob
->zones
[i
];
394 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
395 zone
->name
, (unsigned long long)zone
->max_mem
>> 10);
397 ttm_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
398 ttm_dma_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
401 ttm_mem_global_release(glob
);
404 EXPORT_SYMBOL(ttm_mem_global_init
);
406 void ttm_mem_global_release(struct ttm_mem_global
*glob
)
409 struct ttm_mem_zone
*zone
;
411 /* let the page allocator first stop the shrink work. */
412 ttm_page_alloc_fini();
413 ttm_dma_page_alloc_fini();
415 flush_workqueue(glob
->swap_queue
);
416 destroy_workqueue(glob
->swap_queue
);
417 glob
->swap_queue
= NULL
;
418 for (i
= 0; i
< glob
->num_zones
; ++i
) {
419 zone
= glob
->zones
[i
];
420 kobject_del(&zone
->kobj
);
421 kobject_put(&zone
->kobj
);
423 kobject_del(&glob
->kobj
);
424 kobject_put(&glob
->kobj
);
426 EXPORT_SYMBOL(ttm_mem_global_release
);
428 static void ttm_check_swapping(struct ttm_mem_global
*glob
)
430 bool needs_swapping
= false;
432 struct ttm_mem_zone
*zone
;
434 spin_lock(&glob
->lock
);
435 for (i
= 0; i
< glob
->num_zones
; ++i
) {
436 zone
= glob
->zones
[i
];
437 if (zone
->used_mem
> zone
->swap_limit
) {
438 needs_swapping
= true;
443 spin_unlock(&glob
->lock
);
445 if (unlikely(needs_swapping
))
446 (void)queue_work(glob
->swap_queue
, &glob
->work
);
450 static void ttm_mem_global_free_zone(struct ttm_mem_global
*glob
,
451 struct ttm_mem_zone
*single_zone
,
455 struct ttm_mem_zone
*zone
;
457 spin_lock(&glob
->lock
);
458 for (i
= 0; i
< glob
->num_zones
; ++i
) {
459 zone
= glob
->zones
[i
];
460 if (single_zone
&& zone
!= single_zone
)
462 zone
->used_mem
-= amount
;
464 spin_unlock(&glob
->lock
);
467 void ttm_mem_global_free(struct ttm_mem_global
*glob
,
470 return ttm_mem_global_free_zone(glob
, NULL
, amount
);
472 EXPORT_SYMBOL(ttm_mem_global_free
);
474 static int ttm_mem_global_reserve(struct ttm_mem_global
*glob
,
475 struct ttm_mem_zone
*single_zone
,
476 uint64_t amount
, bool reserve
)
481 struct ttm_mem_zone
*zone
;
483 spin_lock(&glob
->lock
);
484 for (i
= 0; i
< glob
->num_zones
; ++i
) {
485 zone
= glob
->zones
[i
];
486 if (single_zone
&& zone
!= single_zone
)
489 limit
= (capable(CAP_SYS_ADMIN
)) ?
490 zone
->emer_mem
: zone
->max_mem
;
492 if (zone
->used_mem
> limit
)
497 for (i
= 0; i
< glob
->num_zones
; ++i
) {
498 zone
= glob
->zones
[i
];
499 if (single_zone
&& zone
!= single_zone
)
501 zone
->used_mem
+= amount
;
507 spin_unlock(&glob
->lock
);
508 ttm_check_swapping(glob
);
514 static int ttm_mem_global_alloc_zone(struct ttm_mem_global
*glob
,
515 struct ttm_mem_zone
*single_zone
,
517 bool no_wait
, bool interruptible
)
519 int count
= TTM_MEMORY_ALLOC_RETRIES
;
521 while (unlikely(ttm_mem_global_reserve(glob
,
527 if (unlikely(count
-- == 0))
529 ttm_shrink(glob
, false, memory
+ (memory
>> 2) + 16);
535 int ttm_mem_global_alloc(struct ttm_mem_global
*glob
, uint64_t memory
,
536 bool no_wait
, bool interruptible
)
539 * Normal allocations of kernel memory are registered in
543 return ttm_mem_global_alloc_zone(glob
, NULL
, memory
, no_wait
,
546 EXPORT_SYMBOL(ttm_mem_global_alloc
);
548 int ttm_mem_global_alloc_page(struct ttm_mem_global
*glob
,
550 bool no_wait
, bool interruptible
)
553 struct ttm_mem_zone
*zone
= NULL
;
556 * Page allocations may be registed in a single zone
557 * only if highmem or !dma32.
560 #ifdef CONFIG_HIGHMEM
561 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
562 zone
= glob
->zone_highmem
;
564 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
565 zone
= glob
->zone_kernel
;
567 return ttm_mem_global_alloc_zone(glob
, zone
, PAGE_SIZE
, no_wait
,
571 void ttm_mem_global_free_page(struct ttm_mem_global
*glob
, struct page
*page
)
573 struct ttm_mem_zone
*zone
= NULL
;
575 #ifdef CONFIG_HIGHMEM
576 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
577 zone
= glob
->zone_highmem
;
579 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
580 zone
= glob
->zone_kernel
;
582 ttm_mem_global_free_zone(glob
, zone
, PAGE_SIZE
);
586 size_t ttm_round_pot(size_t size
)
588 if ((size
& (size
- 1)) == 0)
590 else if (size
> PAGE_SIZE
)
591 return PAGE_ALIGN(size
);
595 while (tmp_size
< size
)
602 EXPORT_SYMBOL(ttm_round_pot
);
604 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global
*glob
)
606 return glob
->zone_kernel
->max_mem
;
608 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size
);