Merge tag 'locks-v3.16-2' of git://git.samba.org/jlayton/linux
[linux/fpc-iii.git] / arch / arm / mm / highmem.c
blob45aeaaca9052f237322cf91a247a69fdb2d1571e
1 /*
2 * arch/arm/mm/highmem.c -- ARM highmem support
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/highmem.h>
15 #include <linux/interrupt.h>
16 #include <asm/fixmap.h>
17 #include <asm/cacheflush.h>
18 #include <asm/tlbflush.h>
19 #include "mm.h"
21 pte_t *fixmap_page_table;
23 static inline void set_fixmap_pte(int idx, pte_t pte)
25 unsigned long vaddr = __fix_to_virt(idx);
26 set_pte_ext(fixmap_page_table + idx, pte, 0);
27 local_flush_tlb_kernel_page(vaddr);
30 static inline pte_t get_fixmap_pte(unsigned long vaddr)
32 unsigned long idx = __virt_to_fix(vaddr);
33 return *(fixmap_page_table + idx);
36 void *kmap(struct page *page)
38 might_sleep();
39 if (!PageHighMem(page))
40 return page_address(page);
41 return kmap_high(page);
43 EXPORT_SYMBOL(kmap);
45 void kunmap(struct page *page)
47 BUG_ON(in_interrupt());
48 if (!PageHighMem(page))
49 return;
50 kunmap_high(page);
52 EXPORT_SYMBOL(kunmap);
54 void *kmap_atomic(struct page *page)
56 unsigned int idx;
57 unsigned long vaddr;
58 void *kmap;
59 int type;
61 pagefault_disable();
62 if (!PageHighMem(page))
63 return page_address(page);
65 #ifdef CONFIG_DEBUG_HIGHMEM
67 * There is no cache coherency issue when non VIVT, so force the
68 * dedicated kmap usage for better debugging purposes in that case.
70 if (!cache_is_vivt())
71 kmap = NULL;
72 else
73 #endif
74 kmap = kmap_high_get(page);
75 if (kmap)
76 return kmap;
78 type = kmap_atomic_idx_push();
80 idx = type + KM_TYPE_NR * smp_processor_id();
81 vaddr = __fix_to_virt(idx);
82 #ifdef CONFIG_DEBUG_HIGHMEM
84 * With debugging enabled, kunmap_atomic forces that entry to 0.
85 * Make sure it was indeed properly unmapped.
87 BUG_ON(!pte_none(*(fixmap_page_table + idx)));
88 #endif
90 * When debugging is off, kunmap_atomic leaves the previous mapping
91 * in place, so the contained TLB flush ensures the TLB is updated
92 * with the new mapping.
94 set_fixmap_pte(idx, mk_pte(page, kmap_prot));
96 return (void *)vaddr;
98 EXPORT_SYMBOL(kmap_atomic);
100 void __kunmap_atomic(void *kvaddr)
102 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
103 int idx, type;
105 if (kvaddr >= (void *)FIXADDR_START) {
106 type = kmap_atomic_idx();
107 idx = type + KM_TYPE_NR * smp_processor_id();
109 if (cache_is_vivt())
110 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
111 #ifdef CONFIG_DEBUG_HIGHMEM
112 BUG_ON(vaddr != __fix_to_virt(idx));
113 set_fixmap_pte(idx, __pte(0));
114 #else
115 (void) idx; /* to kill a warning */
116 #endif
117 kmap_atomic_idx_pop();
118 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
119 /* this address was obtained through kmap_high_get() */
120 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
122 pagefault_enable();
124 EXPORT_SYMBOL(__kunmap_atomic);
126 void *kmap_atomic_pfn(unsigned long pfn)
128 unsigned long vaddr;
129 int idx, type;
131 pagefault_disable();
133 type = kmap_atomic_idx_push();
134 idx = type + KM_TYPE_NR * smp_processor_id();
135 vaddr = __fix_to_virt(idx);
136 #ifdef CONFIG_DEBUG_HIGHMEM
137 BUG_ON(!pte_none(*(fixmap_page_table + idx)));
138 #endif
139 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
141 return (void *)vaddr;
144 struct page *kmap_atomic_to_page(const void *ptr)
146 unsigned long vaddr = (unsigned long)ptr;
148 if (vaddr < FIXADDR_START)
149 return virt_to_page(ptr);
151 return pte_page(get_fixmap_pte(vaddr));