1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #define pr_fmt(fmt) "[TTM] " fmt
31 #include <drm/ttm/ttm_memory.h>
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_page_alloc.h>
34 #include <linux/spinlock.h>
35 #include <linux/sched.h>
36 #include <linux/wait.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
42 #define TTM_MEMORY_ALLOC_RETRIES 4
46 struct ttm_mem_global
*glob
;
55 static struct attribute ttm_mem_sys
= {
56 .name
= "zone_memory",
59 static struct attribute ttm_mem_emer
= {
60 .name
= "emergency_memory",
61 .mode
= S_IRUGO
| S_IWUSR
63 static struct attribute ttm_mem_max
= {
64 .name
= "available_memory",
65 .mode
= S_IRUGO
| S_IWUSR
67 static struct attribute ttm_mem_swap
= {
69 .mode
= S_IRUGO
| S_IWUSR
71 static struct attribute ttm_mem_used
= {
72 .name
= "used_memory",
76 static void ttm_mem_zone_kobj_release(struct kobject
*kobj
)
78 struct ttm_mem_zone
*zone
=
79 container_of(kobj
, struct ttm_mem_zone
, kobj
);
81 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
82 zone
->name
, (unsigned long long)zone
->used_mem
>> 10);
86 static ssize_t
ttm_mem_zone_show(struct kobject
*kobj
,
87 struct attribute
*attr
,
90 struct ttm_mem_zone
*zone
=
91 container_of(kobj
, struct ttm_mem_zone
, kobj
);
94 spin_lock(&zone
->glob
->lock
);
95 if (attr
== &ttm_mem_sys
)
97 else if (attr
== &ttm_mem_emer
)
99 else if (attr
== &ttm_mem_max
)
101 else if (attr
== &ttm_mem_swap
)
102 val
= zone
->swap_limit
;
103 else if (attr
== &ttm_mem_used
)
104 val
= zone
->used_mem
;
105 spin_unlock(&zone
->glob
->lock
);
107 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
108 (unsigned long long) val
>> 10);
111 static void ttm_check_swapping(struct ttm_mem_global
*glob
);
113 static ssize_t
ttm_mem_zone_store(struct kobject
*kobj
,
114 struct attribute
*attr
,
118 struct ttm_mem_zone
*zone
=
119 container_of(kobj
, struct ttm_mem_zone
, kobj
);
124 chars
= sscanf(buffer
, "%lu", &val
);
131 spin_lock(&zone
->glob
->lock
);
132 if (val64
> zone
->zone_mem
)
133 val64
= zone
->zone_mem
;
134 if (attr
== &ttm_mem_emer
) {
135 zone
->emer_mem
= val64
;
136 if (zone
->max_mem
> val64
)
137 zone
->max_mem
= val64
;
138 } else if (attr
== &ttm_mem_max
) {
139 zone
->max_mem
= val64
;
140 if (zone
->emer_mem
< val64
)
141 zone
->emer_mem
= val64
;
142 } else if (attr
== &ttm_mem_swap
)
143 zone
->swap_limit
= val64
;
144 spin_unlock(&zone
->glob
->lock
);
146 ttm_check_swapping(zone
->glob
);
151 static struct attribute
*ttm_mem_zone_attrs
[] = {
160 static const struct sysfs_ops ttm_mem_zone_ops
= {
161 .show
= &ttm_mem_zone_show
,
162 .store
= &ttm_mem_zone_store
165 static struct kobj_type ttm_mem_zone_kobj_type
= {
166 .release
= &ttm_mem_zone_kobj_release
,
167 .sysfs_ops
= &ttm_mem_zone_ops
,
168 .default_attrs
= ttm_mem_zone_attrs
,
171 static struct attribute ttm_mem_global_lower_mem_limit
= {
172 .name
= "lower_mem_limit",
173 .mode
= S_IRUGO
| S_IWUSR
176 static ssize_t
ttm_mem_global_show(struct kobject
*kobj
,
177 struct attribute
*attr
,
180 struct ttm_mem_global
*glob
=
181 container_of(kobj
, struct ttm_mem_global
, kobj
);
184 spin_lock(&glob
->lock
);
185 val
= glob
->lower_mem_limit
;
186 spin_unlock(&glob
->lock
);
187 /* convert from number of pages to KB */
188 val
<<= (PAGE_SHIFT
- 10);
189 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
190 (unsigned long long) val
);
193 static ssize_t
ttm_mem_global_store(struct kobject
*kobj
,
194 struct attribute
*attr
,
201 struct ttm_mem_global
*glob
=
202 container_of(kobj
, struct ttm_mem_global
, kobj
);
204 chars
= sscanf(buffer
, "%lu", &val
);
209 /* convert from KB to number of pages */
210 val64
>>= (PAGE_SHIFT
- 10);
212 spin_lock(&glob
->lock
);
213 glob
->lower_mem_limit
= val64
;
214 spin_unlock(&glob
->lock
);
219 static struct attribute
*ttm_mem_global_attrs
[] = {
220 &ttm_mem_global_lower_mem_limit
,
224 static const struct sysfs_ops ttm_mem_global_ops
= {
225 .show
= &ttm_mem_global_show
,
226 .store
= &ttm_mem_global_store
,
229 static struct kobj_type ttm_mem_glob_kobj_type
= {
230 .sysfs_ops
= &ttm_mem_global_ops
,
231 .default_attrs
= ttm_mem_global_attrs
,
234 static bool ttm_zones_above_swap_target(struct ttm_mem_global
*glob
,
235 bool from_wq
, uint64_t extra
)
238 struct ttm_mem_zone
*zone
;
241 for (i
= 0; i
< glob
->num_zones
; ++i
) {
242 zone
= glob
->zones
[i
];
245 target
= zone
->swap_limit
;
246 else if (capable(CAP_SYS_ADMIN
))
247 target
= zone
->emer_mem
;
249 target
= zone
->max_mem
;
251 target
= (extra
> target
) ? 0ULL : target
;
253 if (zone
->used_mem
> target
)
260 * At this point we only support a single shrink callback.
261 * Extend this if needed, perhaps using a linked list of callbacks.
262 * Note that this function is reentrant:
263 * many threads may try to swap out at any given time.
266 static void ttm_shrink(struct ttm_mem_global
*glob
, bool from_wq
,
267 uint64_t extra
, struct ttm_operation_ctx
*ctx
)
271 spin_lock(&glob
->lock
);
273 while (ttm_zones_above_swap_target(glob
, from_wq
, extra
)) {
274 spin_unlock(&glob
->lock
);
275 ret
= ttm_bo_swapout(glob
->bo_glob
, ctx
);
276 spin_lock(&glob
->lock
);
277 if (unlikely(ret
!= 0))
281 spin_unlock(&glob
->lock
);
284 static void ttm_shrink_work(struct work_struct
*work
)
286 struct ttm_operation_ctx ctx
= {
287 .interruptible
= false,
290 struct ttm_mem_global
*glob
=
291 container_of(work
, struct ttm_mem_global
, work
);
293 ttm_shrink(glob
, true, 0ULL, &ctx
);
296 static int ttm_mem_init_kernel_zone(struct ttm_mem_global
*glob
,
297 const struct sysinfo
*si
)
299 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
306 mem
= si
->totalram
- si
->totalhigh
;
309 zone
->name
= "kernel";
310 zone
->zone_mem
= mem
;
311 zone
->max_mem
= mem
>> 1;
312 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
313 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
316 glob
->zone_kernel
= zone
;
317 ret
= kobject_init_and_add(
318 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
319 if (unlikely(ret
!= 0)) {
320 kobject_put(&zone
->kobj
);
323 glob
->zones
[glob
->num_zones
++] = zone
;
327 #ifdef CONFIG_HIGHMEM
328 static int ttm_mem_init_highmem_zone(struct ttm_mem_global
*glob
,
329 const struct sysinfo
*si
)
331 struct ttm_mem_zone
*zone
;
335 if (si
->totalhigh
== 0)
338 zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
345 zone
->name
= "highmem";
346 zone
->zone_mem
= mem
;
347 zone
->max_mem
= mem
>> 1;
348 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
349 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
352 glob
->zone_highmem
= zone
;
353 ret
= kobject_init_and_add(
354 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, "%s",
356 if (unlikely(ret
!= 0)) {
357 kobject_put(&zone
->kobj
);
360 glob
->zones
[glob
->num_zones
++] = zone
;
364 static int ttm_mem_init_dma32_zone(struct ttm_mem_global
*glob
,
365 const struct sysinfo
*si
)
367 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
378 * No special dma32 zone needed.
381 if (mem
<= ((uint64_t) 1ULL << 32)) {
387 * Limit max dma32 memory to 4GB for now
388 * until we can figure out how big this
392 mem
= ((uint64_t) 1ULL << 32);
393 zone
->name
= "dma32";
394 zone
->zone_mem
= mem
;
395 zone
->max_mem
= mem
>> 1;
396 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
397 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
400 glob
->zone_dma32
= zone
;
401 ret
= kobject_init_and_add(
402 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
403 if (unlikely(ret
!= 0)) {
404 kobject_put(&zone
->kobj
);
407 glob
->zones
[glob
->num_zones
++] = zone
;
412 int ttm_mem_global_init(struct ttm_mem_global
*glob
)
417 struct ttm_mem_zone
*zone
;
419 spin_lock_init(&glob
->lock
);
420 glob
->swap_queue
= create_singlethread_workqueue("ttm_swap");
421 INIT_WORK(&glob
->work
, ttm_shrink_work
);
422 ret
= kobject_init_and_add(
423 &glob
->kobj
, &ttm_mem_glob_kobj_type
, ttm_get_kobj(), "memory_accounting");
424 if (unlikely(ret
!= 0)) {
425 kobject_put(&glob
->kobj
);
431 /* set it as 0 by default to keep original behavior of OOM */
432 glob
->lower_mem_limit
= 0;
434 ret
= ttm_mem_init_kernel_zone(glob
, &si
);
435 if (unlikely(ret
!= 0))
437 #ifdef CONFIG_HIGHMEM
438 ret
= ttm_mem_init_highmem_zone(glob
, &si
);
439 if (unlikely(ret
!= 0))
442 ret
= ttm_mem_init_dma32_zone(glob
, &si
);
443 if (unlikely(ret
!= 0))
446 for (i
= 0; i
< glob
->num_zones
; ++i
) {
447 zone
= glob
->zones
[i
];
448 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
449 zone
->name
, (unsigned long long)zone
->max_mem
>> 10);
451 ttm_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
452 ttm_dma_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
455 ttm_mem_global_release(glob
);
458 EXPORT_SYMBOL(ttm_mem_global_init
);
460 void ttm_mem_global_release(struct ttm_mem_global
*glob
)
463 struct ttm_mem_zone
*zone
;
465 /* let the page allocator first stop the shrink work. */
466 ttm_page_alloc_fini();
467 ttm_dma_page_alloc_fini();
469 flush_workqueue(glob
->swap_queue
);
470 destroy_workqueue(glob
->swap_queue
);
471 glob
->swap_queue
= NULL
;
472 for (i
= 0; i
< glob
->num_zones
; ++i
) {
473 zone
= glob
->zones
[i
];
474 kobject_del(&zone
->kobj
);
475 kobject_put(&zone
->kobj
);
477 kobject_del(&glob
->kobj
);
478 kobject_put(&glob
->kobj
);
480 EXPORT_SYMBOL(ttm_mem_global_release
);
482 static void ttm_check_swapping(struct ttm_mem_global
*glob
)
484 bool needs_swapping
= false;
486 struct ttm_mem_zone
*zone
;
488 spin_lock(&glob
->lock
);
489 for (i
= 0; i
< glob
->num_zones
; ++i
) {
490 zone
= glob
->zones
[i
];
491 if (zone
->used_mem
> zone
->swap_limit
) {
492 needs_swapping
= true;
497 spin_unlock(&glob
->lock
);
499 if (unlikely(needs_swapping
))
500 (void)queue_work(glob
->swap_queue
, &glob
->work
);
504 static void ttm_mem_global_free_zone(struct ttm_mem_global
*glob
,
505 struct ttm_mem_zone
*single_zone
,
509 struct ttm_mem_zone
*zone
;
511 spin_lock(&glob
->lock
);
512 for (i
= 0; i
< glob
->num_zones
; ++i
) {
513 zone
= glob
->zones
[i
];
514 if (single_zone
&& zone
!= single_zone
)
516 zone
->used_mem
-= amount
;
518 spin_unlock(&glob
->lock
);
521 void ttm_mem_global_free(struct ttm_mem_global
*glob
,
524 return ttm_mem_global_free_zone(glob
, NULL
, amount
);
526 EXPORT_SYMBOL(ttm_mem_global_free
);
529 * check if the available mem is under lower memory limit
531 * a. if no swap disk at all or free swap space is under swap_mem_limit
532 * but available system mem is bigger than sys_mem_limit, allow TTM
535 * b. if the available system mem is less than sys_mem_limit but free
536 * swap disk is bigger than swap_mem_limit, allow TTM allocation.
539 ttm_check_under_lowerlimit(struct ttm_mem_global
*glob
,
541 struct ttm_operation_ctx
*ctx
)
545 if (ctx
->flags
& TTM_OPT_FLAG_FORCE_ALLOC
)
548 available
= get_nr_swap_pages() + si_mem_available();
549 available
-= num_pages
;
550 if (available
< glob
->lower_mem_limit
)
555 EXPORT_SYMBOL(ttm_check_under_lowerlimit
);
557 static int ttm_mem_global_reserve(struct ttm_mem_global
*glob
,
558 struct ttm_mem_zone
*single_zone
,
559 uint64_t amount
, bool reserve
)
564 struct ttm_mem_zone
*zone
;
566 spin_lock(&glob
->lock
);
567 for (i
= 0; i
< glob
->num_zones
; ++i
) {
568 zone
= glob
->zones
[i
];
569 if (single_zone
&& zone
!= single_zone
)
572 limit
= (capable(CAP_SYS_ADMIN
)) ?
573 zone
->emer_mem
: zone
->max_mem
;
575 if (zone
->used_mem
> limit
)
580 for (i
= 0; i
< glob
->num_zones
; ++i
) {
581 zone
= glob
->zones
[i
];
582 if (single_zone
&& zone
!= single_zone
)
584 zone
->used_mem
+= amount
;
590 spin_unlock(&glob
->lock
);
591 ttm_check_swapping(glob
);
597 static int ttm_mem_global_alloc_zone(struct ttm_mem_global
*glob
,
598 struct ttm_mem_zone
*single_zone
,
600 struct ttm_operation_ctx
*ctx
)
602 int count
= TTM_MEMORY_ALLOC_RETRIES
;
604 while (unlikely(ttm_mem_global_reserve(glob
,
608 if (ctx
->no_wait_gpu
)
610 if (unlikely(count
-- == 0))
612 ttm_shrink(glob
, false, memory
+ (memory
>> 2) + 16, ctx
);
618 int ttm_mem_global_alloc(struct ttm_mem_global
*glob
, uint64_t memory
,
619 struct ttm_operation_ctx
*ctx
)
622 * Normal allocations of kernel memory are registered in
626 return ttm_mem_global_alloc_zone(glob
, NULL
, memory
, ctx
);
628 EXPORT_SYMBOL(ttm_mem_global_alloc
);
630 int ttm_mem_global_alloc_page(struct ttm_mem_global
*glob
,
631 struct page
*page
, uint64_t size
,
632 struct ttm_operation_ctx
*ctx
)
634 struct ttm_mem_zone
*zone
= NULL
;
637 * Page allocations may be registed in a single zone
638 * only if highmem or !dma32.
641 #ifdef CONFIG_HIGHMEM
642 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
643 zone
= glob
->zone_highmem
;
645 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
646 zone
= glob
->zone_kernel
;
648 return ttm_mem_global_alloc_zone(glob
, zone
, size
, ctx
);
651 void ttm_mem_global_free_page(struct ttm_mem_global
*glob
, struct page
*page
,
654 struct ttm_mem_zone
*zone
= NULL
;
656 #ifdef CONFIG_HIGHMEM
657 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
658 zone
= glob
->zone_highmem
;
660 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
661 zone
= glob
->zone_kernel
;
663 ttm_mem_global_free_zone(glob
, zone
, size
);
666 size_t ttm_round_pot(size_t size
)
668 if ((size
& (size
- 1)) == 0)
670 else if (size
> PAGE_SIZE
)
671 return PAGE_ALIGN(size
);
675 while (tmp_size
< size
)
682 EXPORT_SYMBOL(ttm_round_pot
);
684 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global
*glob
)
686 return glob
->zone_kernel
->max_mem
;
688 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size
);