mtd: dc21285: use raw spinlock functions for nw_gpio_lock
[linux/fpc-iii.git] / arch / mips / mm / highmem.c
blobda815d295239baaaf6e1d1069e2255baa8d75358
1 #include <linux/compiler.h>
2 #include <linux/module.h>
3 #include <linux/highmem.h>
4 #include <linux/sched.h>
5 #include <linux/smp.h>
6 #include <asm/fixmap.h>
7 #include <asm/tlbflush.h>
9 static pte_t *kmap_pte;
11 unsigned long highstart_pfn, highend_pfn;
13 void *kmap(struct page *page)
15 void *addr;
17 might_sleep();
18 if (!PageHighMem(page))
19 return page_address(page);
20 addr = kmap_high(page);
21 flush_tlb_one((unsigned long)addr);
23 return addr;
25 EXPORT_SYMBOL(kmap);
27 void kunmap(struct page *page)
29 BUG_ON(in_interrupt());
30 if (!PageHighMem(page))
31 return;
32 kunmap_high(page);
34 EXPORT_SYMBOL(kunmap);
37 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
38 * no global lock is needed and because the kmap code must perform a global TLB
39 * invalidation when the kmap pool wraps.
41 * However when holding an atomic kmap is is not legal to sleep, so atomic
42 * kmaps are appropriate for short, tight code paths only.
45 void *kmap_atomic(struct page *page)
47 unsigned long vaddr;
48 int idx, type;
50 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
51 pagefault_disable();
52 if (!PageHighMem(page))
53 return page_address(page);
55 type = kmap_atomic_idx_push();
56 idx = type + KM_TYPE_NR*smp_processor_id();
57 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
58 #ifdef CONFIG_DEBUG_HIGHMEM
59 BUG_ON(!pte_none(*(kmap_pte - idx)));
60 #endif
61 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
62 local_flush_tlb_one((unsigned long)vaddr);
64 return (void*) vaddr;
66 EXPORT_SYMBOL(kmap_atomic);
68 void __kunmap_atomic(void *kvaddr)
70 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
71 int type __maybe_unused;
73 if (vaddr < FIXADDR_START) { // FIXME
74 pagefault_enable();
75 return;
78 type = kmap_atomic_idx();
79 #ifdef CONFIG_DEBUG_HIGHMEM
81 int idx = type + KM_TYPE_NR * smp_processor_id();
83 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
86 * force other mappings to Oops if they'll try to access
87 * this pte without first remap it
89 pte_clear(&init_mm, vaddr, kmap_pte-idx);
90 local_flush_tlb_one(vaddr);
92 #endif
93 kmap_atomic_idx_pop();
94 pagefault_enable();
96 EXPORT_SYMBOL(__kunmap_atomic);
99 * This is the same as kmap_atomic() but can map memory that doesn't
100 * have a struct page associated with it.
102 void *kmap_atomic_pfn(unsigned long pfn)
104 unsigned long vaddr;
105 int idx, type;
107 pagefault_disable();
109 type = kmap_atomic_idx_push();
110 idx = type + KM_TYPE_NR*smp_processor_id();
111 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
112 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
113 flush_tlb_one(vaddr);
115 return (void*) vaddr;
118 struct page *kmap_atomic_to_page(void *ptr)
120 unsigned long idx, vaddr = (unsigned long)ptr;
121 pte_t *pte;
123 if (vaddr < FIXADDR_START)
124 return virt_to_page(ptr);
126 idx = virt_to_fix(vaddr);
127 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
128 return pte_page(*pte);
131 void __init kmap_init(void)
133 unsigned long kmap_vstart;
135 /* cache the first kmap pte */
136 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
137 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);