1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page
*page
)
7 if (!PageHighMem(page
))
8 return page_address(page
);
9 return kmap_high(page
);
12 void kunmap(struct page
*page
)
16 if (!PageHighMem(page
))
21 static void debug_kmap_atomic_prot(enum km_type type
)
23 #ifdef CONFIG_DEBUG_HIGHMEM
24 static unsigned warn_count
= 10;
26 if (unlikely(warn_count
== 0))
29 if (unlikely(in_interrupt())) {
31 if (type
!= KM_IRQ0
&& type
!= KM_IRQ1
&&
32 type
!= KM_BIO_SRC_IRQ
&& type
!= KM_BIO_DST_IRQ
&&
33 type
!= KM_BOUNCE_READ
) {
37 } else if (!irqs_disabled()) { /* softirq */
38 if (type
!= KM_IRQ0
&& type
!= KM_IRQ1
&&
39 type
!= KM_SOFTIRQ0
&& type
!= KM_SOFTIRQ1
&&
40 type
!= KM_SKB_SUNRPC_DATA
&&
41 type
!= KM_SKB_DATA_SOFTIRQ
&&
42 type
!= KM_BOUNCE_READ
) {
49 if (type
== KM_IRQ0
|| type
== KM_IRQ1
|| type
== KM_BOUNCE_READ
||
50 type
== KM_BIO_SRC_IRQ
|| type
== KM_BIO_DST_IRQ
) {
51 if (!irqs_disabled()) {
55 } else if (type
== KM_SOFTIRQ0
|| type
== KM_SOFTIRQ1
) {
56 if (irq_count() == 0 && !irqs_disabled()) {
65 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
66 * no global lock is needed and because the kmap code must perform a global TLB
67 * invalidation when the kmap pool wraps.
69 * However when holding an atomic kmap is is not legal to sleep, so atomic
70 * kmaps are appropriate for short, tight code paths only.
72 void *kmap_atomic_prot(struct page
*page
, enum km_type type
, pgprot_t prot
)
74 enum fixed_addresses idx
;
77 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
80 if (!PageHighMem(page
))
81 return page_address(page
);
83 debug_kmap_atomic_prot(type
);
85 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
86 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
87 BUG_ON(!pte_none(*(kmap_pte
-idx
)));
88 set_pte(kmap_pte
-idx
, mk_pte(page
, prot
));
89 arch_flush_lazy_mmu_mode();
94 void *kmap_atomic(struct page
*page
, enum km_type type
)
96 return kmap_atomic_prot(page
, type
, kmap_prot
);
99 void kunmap_atomic(void *kvaddr
, enum km_type type
)
101 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
102 enum fixed_addresses idx
= type
+ KM_TYPE_NR
*smp_processor_id();
105 * Force other mappings to Oops if they'll try to access this pte
106 * without first remap it. Keeping stale mappings around is a bad idea
107 * also, in case the page changes cacheability attributes or becomes
108 * a protected page in a hypervisor.
110 if (vaddr
== __fix_to_virt(FIX_KMAP_BEGIN
+idx
))
111 kpte_clear_flush(kmap_pte
-idx
, vaddr
);
113 #ifdef CONFIG_DEBUG_HIGHMEM
114 BUG_ON(vaddr
< PAGE_OFFSET
);
115 BUG_ON(vaddr
>= (unsigned long)high_memory
);
119 arch_flush_lazy_mmu_mode();
123 /* This is the same as kmap_atomic() but can map memory that doesn't
124 * have a struct page associated with it.
126 void *kmap_atomic_pfn(unsigned long pfn
, enum km_type type
)
128 enum fixed_addresses idx
;
133 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
134 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
135 set_pte(kmap_pte
-idx
, pfn_pte(pfn
, kmap_prot
));
136 arch_flush_lazy_mmu_mode();
138 return (void*) vaddr
;
141 struct page
*kmap_atomic_to_page(void *ptr
)
143 unsigned long idx
, vaddr
= (unsigned long)ptr
;
146 if (vaddr
< FIXADDR_START
)
147 return virt_to_page(ptr
);
149 idx
= virt_to_fix(vaddr
);
150 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
151 return pte_page(*pte
);
155 EXPORT_SYMBOL(kunmap
);
156 EXPORT_SYMBOL(kmap_atomic
);
157 EXPORT_SYMBOL(kunmap_atomic
);
158 EXPORT_SYMBOL(kmap_atomic_to_page
);