1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4 #include <linux/bootmem.h>
6 void *kmap(struct page
*page
)
9 if (!PageHighMem(page
))
10 return page_address(page
);
11 return kmap_high(page
);
15 void kunmap(struct page
*page
)
19 if (!PageHighMem(page
))
23 EXPORT_SYMBOL(kunmap
);
26 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27 * no global lock is needed and because the kmap code must perform a global TLB
28 * invalidation when the kmap pool wraps.
30 * However when holding an atomic kmap it is not legal to sleep, so atomic
31 * kmaps are appropriate for short, tight code paths only.
33 void *kmap_atomic_prot(struct page
*page
, pgprot_t prot
)
38 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
41 if (!PageHighMem(page
))
42 return page_address(page
);
44 type
= kmap_atomic_idx_push();
45 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
46 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
47 BUG_ON(!pte_none(*(kmap_pte
-idx
)));
48 set_pte(kmap_pte
-idx
, mk_pte(page
, prot
));
49 arch_flush_lazy_mmu_mode();
53 EXPORT_SYMBOL(kmap_atomic_prot
);
55 void *kmap_atomic(struct page
*page
)
57 return kmap_atomic_prot(page
, kmap_prot
);
59 EXPORT_SYMBOL(kmap_atomic
);
62 * This is the same as kmap_atomic() but can map memory that doesn't
63 * have a struct page associated with it.
65 void *kmap_atomic_pfn(unsigned long pfn
)
67 return kmap_atomic_prot_pfn(pfn
, kmap_prot
);
69 EXPORT_SYMBOL_GPL(kmap_atomic_pfn
);
71 void __kunmap_atomic(void *kvaddr
)
73 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
75 if (vaddr
>= __fix_to_virt(FIX_KMAP_END
) &&
76 vaddr
<= __fix_to_virt(FIX_KMAP_BEGIN
)) {
79 type
= kmap_atomic_idx();
80 idx
= type
+ KM_TYPE_NR
* smp_processor_id();
82 #ifdef CONFIG_DEBUG_HIGHMEM
83 WARN_ON_ONCE(vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
));
86 * Force other mappings to Oops if they'll try to access this
87 * pte without first remap it. Keeping stale mappings around
88 * is a bad idea also, in case the page changes cacheability
89 * attributes or becomes a protected page in a hypervisor.
91 kpte_clear_flush(kmap_pte
-idx
, vaddr
);
92 kmap_atomic_idx_pop();
93 arch_flush_lazy_mmu_mode();
95 #ifdef CONFIG_DEBUG_HIGHMEM
97 BUG_ON(vaddr
< PAGE_OFFSET
);
98 BUG_ON(vaddr
>= (unsigned long)high_memory
);
104 EXPORT_SYMBOL(__kunmap_atomic
);
106 struct page
*kmap_atomic_to_page(void *ptr
)
108 unsigned long idx
, vaddr
= (unsigned long)ptr
;
111 if (vaddr
< FIXADDR_START
)
112 return virt_to_page(ptr
);
114 idx
= virt_to_fix(vaddr
);
115 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
116 return pte_page(*pte
);
118 EXPORT_SYMBOL(kmap_atomic_to_page
);
120 void __init
set_highmem_pages_init(void)
126 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
127 * is invoked before free_all_bootmem()
129 reset_all_zones_managed_pages();
130 for_each_zone(zone
) {
131 unsigned long zone_start_pfn
, zone_end_pfn
;
133 if (!is_highmem(zone
))
136 zone_start_pfn
= zone
->zone_start_pfn
;
137 zone_end_pfn
= zone_start_pfn
+ zone
->spanned_pages
;
139 nid
= zone_to_nid(zone
);
140 printk(KERN_INFO
"Initializing %s for node %d (%08lx:%08lx)\n",
141 zone
->name
, nid
, zone_start_pfn
, zone_end_pfn
);
143 add_highpages_with_active_regions(nid
, zone_start_pfn
,