1 // SPDX-License-Identifier: GPL-2.0
3 * High memory handling common code and variables.
5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
11 * means up to 64 Gigabytes physical RAM.
13 * Rewrote high memory support to move the page cache into
14 * high memory. Implemented permanent (schedulable) kmaps
15 * based on Linus' idea.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/blkdev.h>
27 #include <linux/init.h>
28 #include <linux/hash.h>
29 #include <linux/highmem.h>
30 #include <linux/kgdb.h>
31 #include <asm/tlbflush.h>
32 #include <linux/vmalloc.h>
35 * Virtual_count is not a pure "count".
36 * 0 means that it is not mapped, and has not been mapped
37 * since a TLB flush - it is usable.
38 * 1 means that there are no users, but it has been mapped
39 * since the last TLB flush - so we can't use it.
40 * n means that there are (n-1) current users of it.
45 * Architecture with aliasing data cache may define the following family of
46 * helper functions in its asm/highmem.h to control cache color of virtual
47 * addresses where physical memory pages are mapped by kmap.
49 #ifndef get_pkmap_color
52 * Determine color of virtual address where the page should be mapped.
54 static inline unsigned int get_pkmap_color(struct page
*page
)
58 #define get_pkmap_color get_pkmap_color
61 * Get next index for mapping inside PKMAP region for page with given color.
63 static inline unsigned int get_next_pkmap_nr(unsigned int color
)
65 static unsigned int last_pkmap_nr
;
67 last_pkmap_nr
= (last_pkmap_nr
+ 1) & LAST_PKMAP_MASK
;
72 * Determine if page index inside PKMAP region (pkmap_nr) of given color
73 * has wrapped around PKMAP region end. When this happens an attempt to
74 * flush all unused PKMAP slots is made.
76 static inline int no_more_pkmaps(unsigned int pkmap_nr
, unsigned int color
)
82 * Get the number of PKMAP entries of the given color. If no free slot is
83 * found after checking that many entries, kmap will sleep waiting for
84 * someone to call kunmap and free PKMAP slot.
86 static inline int get_pkmap_entries_count(unsigned int color
)
92 * Get head of a wait queue for PKMAP entries of the given color.
93 * Wait queues for different mapping colors should be independent to avoid
94 * unnecessary wakeups caused by freeing of slots of other colors.
96 static inline wait_queue_head_t
*get_pkmap_wait_queue_head(unsigned int color
)
98 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait
);
100 return &pkmap_map_wait
;
104 atomic_long_t _totalhigh_pages __read_mostly
;
105 EXPORT_SYMBOL(_totalhigh_pages
);
107 unsigned int __nr_free_highpages (void)
110 unsigned int pages
= 0;
112 for_each_populated_zone(zone
) {
113 if (is_highmem(zone
))
114 pages
+= zone_page_state(zone
, NR_FREE_PAGES
);
120 static int pkmap_count
[LAST_PKMAP
];
121 static __cacheline_aligned_in_smp
DEFINE_SPINLOCK(kmap_lock
);
123 pte_t
* pkmap_page_table
;
126 * Most architectures have no use for kmap_high_get(), so let's abstract
127 * the disabling of IRQ out of the locking in that case to save on a
128 * potential useless overhead.
130 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
131 #define lock_kmap() spin_lock_irq(&kmap_lock)
132 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
133 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
134 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
136 #define lock_kmap() spin_lock(&kmap_lock)
137 #define unlock_kmap() spin_unlock(&kmap_lock)
138 #define lock_kmap_any(flags) \
139 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
140 #define unlock_kmap_any(flags) \
141 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
144 struct page
*__kmap_to_page(void *vaddr
)
146 unsigned long addr
= (unsigned long)vaddr
;
148 if (addr
>= PKMAP_ADDR(0) && addr
< PKMAP_ADDR(LAST_PKMAP
)) {
149 int i
= PKMAP_NR(addr
);
150 return pte_page(pkmap_page_table
[i
]);
153 return virt_to_page(addr
);
155 EXPORT_SYMBOL(__kmap_to_page
);
157 static void flush_all_zero_pkmaps(void)
164 for (i
= 0; i
< LAST_PKMAP
; i
++) {
168 * zero means we don't have anything to do,
169 * >1 means that it is still in use. Only
170 * a count of 1 means that it is free but
171 * needs to be unmapped
173 if (pkmap_count
[i
] != 1)
178 BUG_ON(pte_none(pkmap_page_table
[i
]));
181 * Don't need an atomic fetch-and-clear op here;
182 * no-one has the page mapped, and cannot get at
183 * its virtual address (and hence PTE) without first
184 * getting the kmap_lock (which is held here).
185 * So no dangers, even with speculative execution.
187 page
= pte_page(pkmap_page_table
[i
]);
188 pte_clear(&init_mm
, PKMAP_ADDR(i
), &pkmap_page_table
[i
]);
190 set_page_address(page
, NULL
);
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP
));
197 void __kmap_flush_unused(void)
200 flush_all_zero_pkmaps();
204 static inline unsigned long map_new_virtual(struct page
*page
)
208 unsigned int last_pkmap_nr
;
209 unsigned int color
= get_pkmap_color(page
);
212 count
= get_pkmap_entries_count(color
);
213 /* Find an empty entry */
215 last_pkmap_nr
= get_next_pkmap_nr(color
);
216 if (no_more_pkmaps(last_pkmap_nr
, color
)) {
217 flush_all_zero_pkmaps();
218 count
= get_pkmap_entries_count(color
);
220 if (!pkmap_count
[last_pkmap_nr
])
221 break; /* Found a usable entry */
226 * Sleep for somebody else to unmap their entries
229 DECLARE_WAITQUEUE(wait
, current
);
230 wait_queue_head_t
*pkmap_map_wait
=
231 get_pkmap_wait_queue_head(color
);
233 __set_current_state(TASK_UNINTERRUPTIBLE
);
234 add_wait_queue(pkmap_map_wait
, &wait
);
237 remove_wait_queue(pkmap_map_wait
, &wait
);
240 /* Somebody else might have mapped it while we slept */
241 if (page_address(page
))
242 return (unsigned long)page_address(page
);
248 vaddr
= PKMAP_ADDR(last_pkmap_nr
);
249 set_pte_at(&init_mm
, vaddr
,
250 &(pkmap_page_table
[last_pkmap_nr
]), mk_pte(page
, kmap_prot
));
252 pkmap_count
[last_pkmap_nr
] = 1;
253 set_page_address(page
, (void *)vaddr
);
259 * kmap_high - map a highmem page into memory
260 * @page: &struct page to map
262 * Returns the page's virtual memory address.
264 * We cannot call this from interrupts, as it may block.
266 void *kmap_high(struct page
*page
)
271 * For highmem pages, we can't trust "virtual" until
272 * after we have the lock.
275 vaddr
= (unsigned long)page_address(page
);
277 vaddr
= map_new_virtual(page
);
278 pkmap_count
[PKMAP_NR(vaddr
)]++;
279 BUG_ON(pkmap_count
[PKMAP_NR(vaddr
)] < 2);
281 return (void*) vaddr
;
284 EXPORT_SYMBOL(kmap_high
);
286 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
288 * kmap_high_get - pin a highmem page into memory
289 * @page: &struct page to pin
291 * Returns the page's current virtual memory address, or NULL if no mapping
292 * exists. If and only if a non null address is returned then a
293 * matching call to kunmap_high() is necessary.
295 * This can be called from any context.
297 void *kmap_high_get(struct page
*page
)
299 unsigned long vaddr
, flags
;
301 lock_kmap_any(flags
);
302 vaddr
= (unsigned long)page_address(page
);
304 BUG_ON(pkmap_count
[PKMAP_NR(vaddr
)] < 1);
305 pkmap_count
[PKMAP_NR(vaddr
)]++;
307 unlock_kmap_any(flags
);
308 return (void*) vaddr
;
313 * kunmap_high - unmap a highmem page into memory
314 * @page: &struct page to unmap
316 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
317 * only from user context.
319 void kunmap_high(struct page
*page
)
325 unsigned int color
= get_pkmap_color(page
);
326 wait_queue_head_t
*pkmap_map_wait
;
328 lock_kmap_any(flags
);
329 vaddr
= (unsigned long)page_address(page
);
331 nr
= PKMAP_NR(vaddr
);
334 * A count must never go down to zero
335 * without a TLB flush!
338 switch (--pkmap_count
[nr
]) {
343 * Avoid an unnecessary wake_up() function call.
344 * The common case is pkmap_count[] == 1, but
346 * The tasks queued in the wait-queue are guarded
347 * by both the lock in the wait-queue-head and by
348 * the kmap_lock. As the kmap_lock is held here,
349 * no need for the wait-queue-head's lock. Simply
350 * test if the queue is empty.
352 pkmap_map_wait
= get_pkmap_wait_queue_head(color
);
353 need_wakeup
= waitqueue_active(pkmap_map_wait
);
355 unlock_kmap_any(flags
);
357 /* do wake-up, if needed, race-free outside of the spin lock */
359 wake_up(pkmap_map_wait
);
361 EXPORT_SYMBOL(kunmap_high
);
363 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
364 void zero_user_segments(struct page
*page
, unsigned start1
, unsigned end1
,
365 unsigned start2
, unsigned end2
)
369 BUG_ON(end1
> page_size(page
) || end2
> page_size(page
));
371 for (i
= 0; i
< compound_nr(page
); i
++) {
374 if (start1
< PAGE_SIZE
|| start2
< PAGE_SIZE
)
375 kaddr
= kmap_atomic(page
+ i
);
377 if (start1
>= PAGE_SIZE
) {
381 unsigned this_end
= min_t(unsigned, end1
, PAGE_SIZE
);
384 memset(kaddr
+ start1
, 0, this_end
- start1
);
389 if (start2
>= PAGE_SIZE
) {
393 unsigned this_end
= min_t(unsigned, end2
, PAGE_SIZE
);
396 memset(kaddr
+ start2
, 0, this_end
- start2
);
402 kunmap_atomic(kaddr
);
403 flush_dcache_page(page
+ i
);
410 BUG_ON((start1
| start2
| end1
| end2
) != 0);
412 EXPORT_SYMBOL(zero_user_segments
);
413 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
414 #endif /* CONFIG_HIGHMEM */
416 #ifdef CONFIG_KMAP_LOCAL
418 #include <asm/kmap_size.h>
421 * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
422 * slot is unused which acts as a guard page
424 #ifdef CONFIG_DEBUG_KMAP_LOCAL
430 static inline int kmap_local_idx_push(void)
432 WARN_ON_ONCE(in_irq() && !irqs_disabled());
433 current
->kmap_ctrl
.idx
+= KM_INCR
;
434 BUG_ON(current
->kmap_ctrl
.idx
>= KM_MAX_IDX
);
435 return current
->kmap_ctrl
.idx
- 1;
438 static inline int kmap_local_idx(void)
440 return current
->kmap_ctrl
.idx
- 1;
443 static inline void kmap_local_idx_pop(void)
445 current
->kmap_ctrl
.idx
-= KM_INCR
;
446 BUG_ON(current
->kmap_ctrl
.idx
< 0);
449 #ifndef arch_kmap_local_post_map
450 # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
453 #ifndef arch_kmap_local_pre_unmap
454 # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
457 #ifndef arch_kmap_local_post_unmap
458 # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
461 #ifndef arch_kmap_local_map_idx
462 #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
465 #ifndef arch_kmap_local_unmap_idx
466 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
469 #ifndef arch_kmap_local_high_get
470 static inline void *arch_kmap_local_high_get(struct page
*page
)
476 /* Unmap a local mapping which was obtained by kmap_high_get() */
477 static inline bool kmap_high_unmap_local(unsigned long vaddr
)
479 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
480 if (vaddr
>= PKMAP_ADDR(0) && vaddr
< PKMAP_ADDR(LAST_PKMAP
)) {
481 kunmap_high(pte_page(pkmap_page_table
[PKMAP_NR(vaddr
)]));
488 static inline int kmap_local_calc_idx(int idx
)
490 return idx
+ KM_MAX_IDX
* smp_processor_id();
493 static pte_t
*__kmap_pte
;
495 static pte_t
*kmap_get_pte(void)
498 __kmap_pte
= virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN
));
502 void *__kmap_local_pfn_prot(unsigned long pfn
, pgprot_t prot
)
504 pte_t pteval
, *kmap_pte
= kmap_get_pte();
509 * Disable migration so resulting virtual address is stable
510 * accross preemption.
514 idx
= arch_kmap_local_map_idx(kmap_local_idx_push(), pfn
);
515 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
516 BUG_ON(!pte_none(*(kmap_pte
- idx
)));
517 pteval
= pfn_pte(pfn
, prot
);
518 set_pte_at(&init_mm
, vaddr
, kmap_pte
- idx
, pteval
);
519 arch_kmap_local_post_map(vaddr
, pteval
);
520 current
->kmap_ctrl
.pteval
[kmap_local_idx()] = pteval
;
523 return (void *)vaddr
;
525 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot
);
527 void *__kmap_local_page_prot(struct page
*page
, pgprot_t prot
)
532 * To broaden the usage of the actual kmap_local() machinery always map
533 * pages when debugging is enabled and the architecture has no problems
534 * with alias mappings.
536 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
) && !PageHighMem(page
))
537 return page_address(page
);
539 /* Try kmap_high_get() if architecture has it enabled */
540 kmap
= arch_kmap_local_high_get(page
);
544 return __kmap_local_pfn_prot(page_to_pfn(page
), prot
);
546 EXPORT_SYMBOL(__kmap_local_page_prot
);
548 void kunmap_local_indexed(void *vaddr
)
550 unsigned long addr
= (unsigned long) vaddr
& PAGE_MASK
;
551 pte_t
*kmap_pte
= kmap_get_pte();
554 if (addr
< __fix_to_virt(FIX_KMAP_END
) ||
555 addr
> __fix_to_virt(FIX_KMAP_BEGIN
)) {
556 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
)) {
557 /* This _should_ never happen! See above. */
562 * Handle mappings which were obtained by kmap_high_get()
563 * first as the virtual address of such mappings is below
564 * PAGE_OFFSET. Warn for all other addresses which are in
565 * the user space part of the virtual address space.
567 if (!kmap_high_unmap_local(addr
))
568 WARN_ON_ONCE(addr
< PAGE_OFFSET
);
573 idx
= arch_kmap_local_unmap_idx(kmap_local_idx(), addr
);
574 WARN_ON_ONCE(addr
!= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
));
576 arch_kmap_local_pre_unmap(addr
);
577 pte_clear(&init_mm
, addr
, kmap_pte
- idx
);
578 arch_kmap_local_post_unmap(addr
);
579 current
->kmap_ctrl
.pteval
[kmap_local_idx()] = __pte(0);
580 kmap_local_idx_pop();
584 EXPORT_SYMBOL(kunmap_local_indexed
);
587 * Invoked before switch_to(). This is safe even when during or after
588 * clearing the maps an interrupt which needs a kmap_local happens because
589 * the task::kmap_ctrl.idx is not modified by the unmapping code so a
590 * nested kmap_local will use the next unused index and restore the index
591 * on unmap. The already cleared kmaps of the outgoing task are irrelevant
592 * because the interrupt context does not know about them. The same applies
593 * when scheduling back in for an interrupt which happens before the
594 * restore is complete.
596 void __kmap_local_sched_out(void)
598 struct task_struct
*tsk
= current
;
599 pte_t
*kmap_pte
= kmap_get_pte();
603 for (i
= 0; i
< tsk
->kmap_ctrl
.idx
; i
++) {
604 pte_t pteval
= tsk
->kmap_ctrl
.pteval
[i
];
608 /* With debug all even slots are unmapped and act as guard */
609 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM
) && !(i
& 0x01)) {
610 WARN_ON_ONCE(!pte_none(pteval
));
613 if (WARN_ON_ONCE(pte_none(pteval
)))
617 * This is a horrible hack for XTENSA to calculate the
618 * coloured PTE index. Uses the PFN encoded into the pteval
619 * and the map index calculation because the actual mapped
620 * virtual address is not stored in task::kmap_ctrl.
621 * For any sane architecture this is optimized out.
623 idx
= arch_kmap_local_map_idx(i
, pte_pfn(pteval
));
625 addr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
626 arch_kmap_local_pre_unmap(addr
);
627 pte_clear(&init_mm
, addr
, kmap_pte
- idx
);
628 arch_kmap_local_post_unmap(addr
);
632 void __kmap_local_sched_in(void)
634 struct task_struct
*tsk
= current
;
635 pte_t
*kmap_pte
= kmap_get_pte();
639 for (i
= 0; i
< tsk
->kmap_ctrl
.idx
; i
++) {
640 pte_t pteval
= tsk
->kmap_ctrl
.pteval
[i
];
644 /* With debug all even slots are unmapped and act as guard */
645 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM
) && !(i
& 0x01)) {
646 WARN_ON_ONCE(!pte_none(pteval
));
649 if (WARN_ON_ONCE(pte_none(pteval
)))
652 /* See comment in __kmap_local_sched_out() */
653 idx
= arch_kmap_local_map_idx(i
, pte_pfn(pteval
));
654 addr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
655 set_pte_at(&init_mm
, addr
, kmap_pte
- idx
, pteval
);
656 arch_kmap_local_post_map(addr
, pteval
);
660 void kmap_local_fork(struct task_struct
*tsk
)
662 if (WARN_ON_ONCE(tsk
->kmap_ctrl
.idx
))
663 memset(&tsk
->kmap_ctrl
, 0, sizeof(tsk
->kmap_ctrl
));
668 #if defined(HASHED_PAGE_VIRTUAL)
670 #define PA_HASH_ORDER 7
673 * Describes one page->virtual association
675 struct page_address_map
{
678 struct list_head list
;
681 static struct page_address_map page_address_maps
[LAST_PKMAP
];
686 static struct page_address_slot
{
687 struct list_head lh
; /* List of page_address_maps */
688 spinlock_t lock
; /* Protect this bucket's list */
689 } ____cacheline_aligned_in_smp page_address_htable
[1<<PA_HASH_ORDER
];
691 static struct page_address_slot
*page_slot(const struct page
*page
)
693 return &page_address_htable
[hash_ptr(page
, PA_HASH_ORDER
)];
697 * page_address - get the mapped virtual address of a page
698 * @page: &struct page to get the virtual address of
700 * Returns the page's virtual address.
702 void *page_address(const struct page
*page
)
706 struct page_address_slot
*pas
;
708 if (!PageHighMem(page
))
709 return lowmem_page_address(page
);
711 pas
= page_slot(page
);
713 spin_lock_irqsave(&pas
->lock
, flags
);
714 if (!list_empty(&pas
->lh
)) {
715 struct page_address_map
*pam
;
717 list_for_each_entry(pam
, &pas
->lh
, list
) {
718 if (pam
->page
== page
) {
725 spin_unlock_irqrestore(&pas
->lock
, flags
);
729 EXPORT_SYMBOL(page_address
);
732 * set_page_address - set a page's virtual address
733 * @page: &struct page to set
734 * @virtual: virtual address to use
736 void set_page_address(struct page
*page
, void *virtual)
739 struct page_address_slot
*pas
;
740 struct page_address_map
*pam
;
742 BUG_ON(!PageHighMem(page
));
744 pas
= page_slot(page
);
745 if (virtual) { /* Add */
746 pam
= &page_address_maps
[PKMAP_NR((unsigned long)virtual)];
748 pam
->virtual = virtual;
750 spin_lock_irqsave(&pas
->lock
, flags
);
751 list_add_tail(&pam
->list
, &pas
->lh
);
752 spin_unlock_irqrestore(&pas
->lock
, flags
);
753 } else { /* Remove */
754 spin_lock_irqsave(&pas
->lock
, flags
);
755 list_for_each_entry(pam
, &pas
->lh
, list
) {
756 if (pam
->page
== page
) {
757 list_del(&pam
->list
);
758 spin_unlock_irqrestore(&pas
->lock
, flags
);
762 spin_unlock_irqrestore(&pas
->lock
, flags
);
768 void __init
page_address_init(void)
772 for (i
= 0; i
< ARRAY_SIZE(page_address_htable
); i
++) {
773 INIT_LIST_HEAD(&page_address_htable
[i
].lh
);
774 spin_lock_init(&page_address_htable
[i
].lock
);
778 #endif /* defined(HASHED_PAGE_VIRTUAL) */