Linux 2.6.20.13
[linux/fpc-iii.git] / arch / i386 / mm / highmem.c
blobe0fa6cb655a82574a50c3e8fc9334c2f71b9c156
1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page *page)
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
12 void kunmap(struct page *page)
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
29 void *kmap_atomic(struct page *page, enum km_type type)
31 enum fixed_addresses idx;
32 unsigned long vaddr;
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 pagefault_disable();
36 if (!PageHighMem(page))
37 return page_address(page);
39 idx = type + KM_TYPE_NR*smp_processor_id();
40 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
41 if (!pte_none(*(kmap_pte-idx)))
42 BUG();
43 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
45 return (void*) vaddr;
48 void kunmap_atomic(void *kvaddr, enum km_type type)
50 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
51 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
54 * Force other mappings to Oops if they'll try to access this pte
55 * without first remap it. Keeping stale mappings around is a bad idea
56 * also, in case the page changes cacheability attributes or becomes
57 * a protected page in a hypervisor.
59 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
60 kpte_clear_flush(kmap_pte-idx, vaddr);
61 else {
62 #ifdef CONFIG_DEBUG_HIGHMEM
63 BUG_ON(vaddr < PAGE_OFFSET);
64 BUG_ON(vaddr >= (unsigned long)high_memory);
65 #endif
68 pagefault_enable();
71 /* This is the same as kmap_atomic() but can map memory that doesn't
72 * have a struct page associated with it.
74 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
76 enum fixed_addresses idx;
77 unsigned long vaddr;
79 pagefault_disable();
81 idx = type + KM_TYPE_NR*smp_processor_id();
82 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
83 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
85 return (void*) vaddr;
88 struct page *kmap_atomic_to_page(void *ptr)
90 unsigned long idx, vaddr = (unsigned long)ptr;
91 pte_t *pte;
93 if (vaddr < FIXADDR_START)
94 return virt_to_page(ptr);
96 idx = virt_to_fix(vaddr);
97 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
98 return pte_page(*pte);
101 EXPORT_SYMBOL(kmap);
102 EXPORT_SYMBOL(kunmap);
103 EXPORT_SYMBOL(kmap_atomic);
104 EXPORT_SYMBOL(kunmap_atomic);
105 EXPORT_SYMBOL(kmap_atomic_to_page);