1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #define pr_fmt(fmt) "[TTM] " fmt
31 #include <drm/ttm/ttm_memory.h>
32 #include <drm/ttm/ttm_module.h>
33 #include <drm/ttm/ttm_page_alloc.h>
34 #include <linux/spinlock.h>
35 #include <linux/sched.h>
36 #include <linux/wait.h>
38 #include <linux/module.h>
39 #include <linux/slab.h>
40 #include <linux/swap.h>
42 #define TTM_MEMORY_ALLOC_RETRIES 4
44 struct ttm_mem_global ttm_mem_glob
;
45 EXPORT_SYMBOL(ttm_mem_glob
);
49 struct ttm_mem_global
*glob
;
58 static struct attribute ttm_mem_sys
= {
59 .name
= "zone_memory",
62 static struct attribute ttm_mem_emer
= {
63 .name
= "emergency_memory",
64 .mode
= S_IRUGO
| S_IWUSR
66 static struct attribute ttm_mem_max
= {
67 .name
= "available_memory",
68 .mode
= S_IRUGO
| S_IWUSR
70 static struct attribute ttm_mem_swap
= {
72 .mode
= S_IRUGO
| S_IWUSR
74 static struct attribute ttm_mem_used
= {
75 .name
= "used_memory",
79 static void ttm_mem_zone_kobj_release(struct kobject
*kobj
)
81 struct ttm_mem_zone
*zone
=
82 container_of(kobj
, struct ttm_mem_zone
, kobj
);
84 pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
85 zone
->name
, (unsigned long long)zone
->used_mem
>> 10);
89 static ssize_t
ttm_mem_zone_show(struct kobject
*kobj
,
90 struct attribute
*attr
,
93 struct ttm_mem_zone
*zone
=
94 container_of(kobj
, struct ttm_mem_zone
, kobj
);
97 spin_lock(&zone
->glob
->lock
);
98 if (attr
== &ttm_mem_sys
)
100 else if (attr
== &ttm_mem_emer
)
101 val
= zone
->emer_mem
;
102 else if (attr
== &ttm_mem_max
)
104 else if (attr
== &ttm_mem_swap
)
105 val
= zone
->swap_limit
;
106 else if (attr
== &ttm_mem_used
)
107 val
= zone
->used_mem
;
108 spin_unlock(&zone
->glob
->lock
);
110 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
111 (unsigned long long) val
>> 10);
114 static void ttm_check_swapping(struct ttm_mem_global
*glob
);
116 static ssize_t
ttm_mem_zone_store(struct kobject
*kobj
,
117 struct attribute
*attr
,
121 struct ttm_mem_zone
*zone
=
122 container_of(kobj
, struct ttm_mem_zone
, kobj
);
127 chars
= sscanf(buffer
, "%lu", &val
);
134 spin_lock(&zone
->glob
->lock
);
135 if (val64
> zone
->zone_mem
)
136 val64
= zone
->zone_mem
;
137 if (attr
== &ttm_mem_emer
) {
138 zone
->emer_mem
= val64
;
139 if (zone
->max_mem
> val64
)
140 zone
->max_mem
= val64
;
141 } else if (attr
== &ttm_mem_max
) {
142 zone
->max_mem
= val64
;
143 if (zone
->emer_mem
< val64
)
144 zone
->emer_mem
= val64
;
145 } else if (attr
== &ttm_mem_swap
)
146 zone
->swap_limit
= val64
;
147 spin_unlock(&zone
->glob
->lock
);
149 ttm_check_swapping(zone
->glob
);
154 static struct attribute
*ttm_mem_zone_attrs
[] = {
163 static const struct sysfs_ops ttm_mem_zone_ops
= {
164 .show
= &ttm_mem_zone_show
,
165 .store
= &ttm_mem_zone_store
168 static struct kobj_type ttm_mem_zone_kobj_type
= {
169 .release
= &ttm_mem_zone_kobj_release
,
170 .sysfs_ops
= &ttm_mem_zone_ops
,
171 .default_attrs
= ttm_mem_zone_attrs
,
174 static struct attribute ttm_mem_global_lower_mem_limit
= {
175 .name
= "lower_mem_limit",
176 .mode
= S_IRUGO
| S_IWUSR
179 static ssize_t
ttm_mem_global_show(struct kobject
*kobj
,
180 struct attribute
*attr
,
183 struct ttm_mem_global
*glob
=
184 container_of(kobj
, struct ttm_mem_global
, kobj
);
187 spin_lock(&glob
->lock
);
188 val
= glob
->lower_mem_limit
;
189 spin_unlock(&glob
->lock
);
190 /* convert from number of pages to KB */
191 val
<<= (PAGE_SHIFT
- 10);
192 return snprintf(buffer
, PAGE_SIZE
, "%llu\n",
193 (unsigned long long) val
);
196 static ssize_t
ttm_mem_global_store(struct kobject
*kobj
,
197 struct attribute
*attr
,
204 struct ttm_mem_global
*glob
=
205 container_of(kobj
, struct ttm_mem_global
, kobj
);
207 chars
= sscanf(buffer
, "%lu", &val
);
212 /* convert from KB to number of pages */
213 val64
>>= (PAGE_SHIFT
- 10);
215 spin_lock(&glob
->lock
);
216 glob
->lower_mem_limit
= val64
;
217 spin_unlock(&glob
->lock
);
222 static struct attribute
*ttm_mem_global_attrs
[] = {
223 &ttm_mem_global_lower_mem_limit
,
227 static const struct sysfs_ops ttm_mem_global_ops
= {
228 .show
= &ttm_mem_global_show
,
229 .store
= &ttm_mem_global_store
,
232 static struct kobj_type ttm_mem_glob_kobj_type
= {
233 .sysfs_ops
= &ttm_mem_global_ops
,
234 .default_attrs
= ttm_mem_global_attrs
,
237 static bool ttm_zones_above_swap_target(struct ttm_mem_global
*glob
,
238 bool from_wq
, uint64_t extra
)
241 struct ttm_mem_zone
*zone
;
244 for (i
= 0; i
< glob
->num_zones
; ++i
) {
245 zone
= glob
->zones
[i
];
248 target
= zone
->swap_limit
;
249 else if (capable(CAP_SYS_ADMIN
))
250 target
= zone
->emer_mem
;
252 target
= zone
->max_mem
;
254 target
= (extra
> target
) ? 0ULL : target
;
256 if (zone
->used_mem
> target
)
263 * At this point we only support a single shrink callback.
264 * Extend this if needed, perhaps using a linked list of callbacks.
265 * Note that this function is reentrant:
266 * many threads may try to swap out at any given time.
269 static void ttm_shrink(struct ttm_mem_global
*glob
, bool from_wq
,
270 uint64_t extra
, struct ttm_operation_ctx
*ctx
)
274 spin_lock(&glob
->lock
);
276 while (ttm_zones_above_swap_target(glob
, from_wq
, extra
)) {
277 spin_unlock(&glob
->lock
);
278 ret
= ttm_bo_swapout(glob
->bo_glob
, ctx
);
279 spin_lock(&glob
->lock
);
280 if (unlikely(ret
!= 0))
284 spin_unlock(&glob
->lock
);
287 static void ttm_shrink_work(struct work_struct
*work
)
289 struct ttm_operation_ctx ctx
= {
290 .interruptible
= false,
293 struct ttm_mem_global
*glob
=
294 container_of(work
, struct ttm_mem_global
, work
);
296 ttm_shrink(glob
, true, 0ULL, &ctx
);
299 static int ttm_mem_init_kernel_zone(struct ttm_mem_global
*glob
,
300 const struct sysinfo
*si
)
302 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
309 mem
= si
->totalram
- si
->totalhigh
;
312 zone
->name
= "kernel";
313 zone
->zone_mem
= mem
;
314 zone
->max_mem
= mem
>> 1;
315 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
316 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
319 glob
->zone_kernel
= zone
;
320 ret
= kobject_init_and_add(
321 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
322 if (unlikely(ret
!= 0)) {
323 kobject_put(&zone
->kobj
);
326 glob
->zones
[glob
->num_zones
++] = zone
;
330 #ifdef CONFIG_HIGHMEM
331 static int ttm_mem_init_highmem_zone(struct ttm_mem_global
*glob
,
332 const struct sysinfo
*si
)
334 struct ttm_mem_zone
*zone
;
338 if (si
->totalhigh
== 0)
341 zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
348 zone
->name
= "highmem";
349 zone
->zone_mem
= mem
;
350 zone
->max_mem
= mem
>> 1;
351 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
352 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
355 glob
->zone_highmem
= zone
;
356 ret
= kobject_init_and_add(
357 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, "%s",
359 if (unlikely(ret
!= 0)) {
360 kobject_put(&zone
->kobj
);
363 glob
->zones
[glob
->num_zones
++] = zone
;
367 static int ttm_mem_init_dma32_zone(struct ttm_mem_global
*glob
,
368 const struct sysinfo
*si
)
370 struct ttm_mem_zone
*zone
= kzalloc(sizeof(*zone
), GFP_KERNEL
);
381 * No special dma32 zone needed.
384 if (mem
<= ((uint64_t) 1ULL << 32)) {
390 * Limit max dma32 memory to 4GB for now
391 * until we can figure out how big this
395 mem
= ((uint64_t) 1ULL << 32);
396 zone
->name
= "dma32";
397 zone
->zone_mem
= mem
;
398 zone
->max_mem
= mem
>> 1;
399 zone
->emer_mem
= (mem
>> 1) + (mem
>> 2);
400 zone
->swap_limit
= zone
->max_mem
- (mem
>> 3);
403 glob
->zone_dma32
= zone
;
404 ret
= kobject_init_and_add(
405 &zone
->kobj
, &ttm_mem_zone_kobj_type
, &glob
->kobj
, zone
->name
);
406 if (unlikely(ret
!= 0)) {
407 kobject_put(&zone
->kobj
);
410 glob
->zones
[glob
->num_zones
++] = zone
;
415 int ttm_mem_global_init(struct ttm_mem_global
*glob
)
420 struct ttm_mem_zone
*zone
;
422 spin_lock_init(&glob
->lock
);
423 glob
->swap_queue
= create_singlethread_workqueue("ttm_swap");
424 INIT_WORK(&glob
->work
, ttm_shrink_work
);
425 ret
= kobject_init_and_add(
426 &glob
->kobj
, &ttm_mem_glob_kobj_type
, ttm_get_kobj(), "memory_accounting");
427 if (unlikely(ret
!= 0)) {
428 kobject_put(&glob
->kobj
);
434 /* set it as 0 by default to keep original behavior of OOM */
435 glob
->lower_mem_limit
= 0;
437 ret
= ttm_mem_init_kernel_zone(glob
, &si
);
438 if (unlikely(ret
!= 0))
440 #ifdef CONFIG_HIGHMEM
441 ret
= ttm_mem_init_highmem_zone(glob
, &si
);
442 if (unlikely(ret
!= 0))
445 ret
= ttm_mem_init_dma32_zone(glob
, &si
);
446 if (unlikely(ret
!= 0))
449 for (i
= 0; i
< glob
->num_zones
; ++i
) {
450 zone
= glob
->zones
[i
];
451 pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
452 zone
->name
, (unsigned long long)zone
->max_mem
>> 10);
454 ttm_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
455 ttm_dma_page_alloc_init(glob
, glob
->zone_kernel
->max_mem
/(2*PAGE_SIZE
));
458 ttm_mem_global_release(glob
);
462 void ttm_mem_global_release(struct ttm_mem_global
*glob
)
465 struct ttm_mem_zone
*zone
;
467 /* let the page allocator first stop the shrink work. */
468 ttm_page_alloc_fini();
469 ttm_dma_page_alloc_fini();
471 flush_workqueue(glob
->swap_queue
);
472 destroy_workqueue(glob
->swap_queue
);
473 glob
->swap_queue
= NULL
;
474 for (i
= 0; i
< glob
->num_zones
; ++i
) {
475 zone
= glob
->zones
[i
];
476 kobject_del(&zone
->kobj
);
477 kobject_put(&zone
->kobj
);
479 kobject_del(&glob
->kobj
);
480 kobject_put(&glob
->kobj
);
483 static void ttm_check_swapping(struct ttm_mem_global
*glob
)
485 bool needs_swapping
= false;
487 struct ttm_mem_zone
*zone
;
489 spin_lock(&glob
->lock
);
490 for (i
= 0; i
< glob
->num_zones
; ++i
) {
491 zone
= glob
->zones
[i
];
492 if (zone
->used_mem
> zone
->swap_limit
) {
493 needs_swapping
= true;
498 spin_unlock(&glob
->lock
);
500 if (unlikely(needs_swapping
))
501 (void)queue_work(glob
->swap_queue
, &glob
->work
);
505 static void ttm_mem_global_free_zone(struct ttm_mem_global
*glob
,
506 struct ttm_mem_zone
*single_zone
,
510 struct ttm_mem_zone
*zone
;
512 spin_lock(&glob
->lock
);
513 for (i
= 0; i
< glob
->num_zones
; ++i
) {
514 zone
= glob
->zones
[i
];
515 if (single_zone
&& zone
!= single_zone
)
517 zone
->used_mem
-= amount
;
519 spin_unlock(&glob
->lock
);
522 void ttm_mem_global_free(struct ttm_mem_global
*glob
,
525 return ttm_mem_global_free_zone(glob
, NULL
, amount
);
527 EXPORT_SYMBOL(ttm_mem_global_free
);
530 * check if the available mem is under lower memory limit
532 * a. if no swap disk at all or free swap space is under swap_mem_limit
533 * but available system mem is bigger than sys_mem_limit, allow TTM
536 * b. if the available system mem is less than sys_mem_limit but free
537 * swap disk is bigger than swap_mem_limit, allow TTM allocation.
540 ttm_check_under_lowerlimit(struct ttm_mem_global
*glob
,
542 struct ttm_operation_ctx
*ctx
)
546 if (ctx
->flags
& TTM_OPT_FLAG_FORCE_ALLOC
)
549 available
= get_nr_swap_pages() + si_mem_available();
550 available
-= num_pages
;
551 if (available
< glob
->lower_mem_limit
)
556 EXPORT_SYMBOL(ttm_check_under_lowerlimit
);
558 static int ttm_mem_global_reserve(struct ttm_mem_global
*glob
,
559 struct ttm_mem_zone
*single_zone
,
560 uint64_t amount
, bool reserve
)
565 struct ttm_mem_zone
*zone
;
567 spin_lock(&glob
->lock
);
568 for (i
= 0; i
< glob
->num_zones
; ++i
) {
569 zone
= glob
->zones
[i
];
570 if (single_zone
&& zone
!= single_zone
)
573 limit
= (capable(CAP_SYS_ADMIN
)) ?
574 zone
->emer_mem
: zone
->max_mem
;
576 if (zone
->used_mem
> limit
)
581 for (i
= 0; i
< glob
->num_zones
; ++i
) {
582 zone
= glob
->zones
[i
];
583 if (single_zone
&& zone
!= single_zone
)
585 zone
->used_mem
+= amount
;
591 spin_unlock(&glob
->lock
);
592 ttm_check_swapping(glob
);
598 static int ttm_mem_global_alloc_zone(struct ttm_mem_global
*glob
,
599 struct ttm_mem_zone
*single_zone
,
601 struct ttm_operation_ctx
*ctx
)
603 int count
= TTM_MEMORY_ALLOC_RETRIES
;
605 while (unlikely(ttm_mem_global_reserve(glob
,
609 if (ctx
->no_wait_gpu
)
611 if (unlikely(count
-- == 0))
613 ttm_shrink(glob
, false, memory
+ (memory
>> 2) + 16, ctx
);
619 int ttm_mem_global_alloc(struct ttm_mem_global
*glob
, uint64_t memory
,
620 struct ttm_operation_ctx
*ctx
)
623 * Normal allocations of kernel memory are registered in
627 return ttm_mem_global_alloc_zone(glob
, NULL
, memory
, ctx
);
629 EXPORT_SYMBOL(ttm_mem_global_alloc
);
631 int ttm_mem_global_alloc_page(struct ttm_mem_global
*glob
,
632 struct page
*page
, uint64_t size
,
633 struct ttm_operation_ctx
*ctx
)
635 struct ttm_mem_zone
*zone
= NULL
;
638 * Page allocations may be registed in a single zone
639 * only if highmem or !dma32.
642 #ifdef CONFIG_HIGHMEM
643 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
644 zone
= glob
->zone_highmem
;
646 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
647 zone
= glob
->zone_kernel
;
649 return ttm_mem_global_alloc_zone(glob
, zone
, size
, ctx
);
652 void ttm_mem_global_free_page(struct ttm_mem_global
*glob
, struct page
*page
,
655 struct ttm_mem_zone
*zone
= NULL
;
657 #ifdef CONFIG_HIGHMEM
658 if (PageHighMem(page
) && glob
->zone_highmem
!= NULL
)
659 zone
= glob
->zone_highmem
;
661 if (glob
->zone_dma32
&& page_to_pfn(page
) > 0x00100000UL
)
662 zone
= glob
->zone_kernel
;
664 ttm_mem_global_free_zone(glob
, zone
, size
);
667 size_t ttm_round_pot(size_t size
)
669 if ((size
& (size
- 1)) == 0)
671 else if (size
> PAGE_SIZE
)
672 return PAGE_ALIGN(size
);
676 while (tmp_size
< size
)
683 EXPORT_SYMBOL(ttm_round_pot
);
685 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global
*glob
)
687 return glob
->zone_kernel
->max_mem
;
689 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size
);