1 #include <linux/module.h>
2 #include <linux/highmem.h>
3 #include <linux/sched.h>
5 #include <asm/fixmap.h>
6 #include <asm/tlbflush.h>
8 static pte_t
*kmap_pte
;
10 unsigned long highstart_pfn
, highend_pfn
;
12 void *kmap(struct page
*page
)
17 if (!PageHighMem(page
))
18 return page_address(page
);
19 addr
= kmap_high(page
);
20 flush_tlb_one((unsigned long)addr
);
26 void kunmap(struct page
*page
)
28 BUG_ON(in_interrupt());
29 if (!PageHighMem(page
))
33 EXPORT_SYMBOL(kunmap
);
36 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
37 * no global lock is needed and because the kmap code must perform a global TLB
38 * invalidation when the kmap pool wraps.
40 * However when holding an atomic kmap is is not legal to sleep, so atomic
41 * kmaps are appropriate for short, tight code paths only.
44 void *kmap_atomic(struct page
*page
)
49 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
51 if (!PageHighMem(page
))
52 return page_address(page
);
54 type
= kmap_atomic_idx_push();
55 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
56 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
57 #ifdef CONFIG_DEBUG_HIGHMEM
58 BUG_ON(!pte_none(*(kmap_pte
- idx
)));
60 set_pte(kmap_pte
-idx
, mk_pte(page
, PAGE_KERNEL
));
61 local_flush_tlb_one((unsigned long)vaddr
);
65 EXPORT_SYMBOL(kmap_atomic
);
67 void __kunmap_atomic(void *kvaddr
)
69 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
72 if (vaddr
< FIXADDR_START
) { // FIXME
77 type
= kmap_atomic_idx();
78 #ifdef CONFIG_DEBUG_HIGHMEM
80 int idx
= type
+ KM_TYPE_NR
* smp_processor_id();
82 BUG_ON(vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
));
85 * force other mappings to Oops if they'll try to access
86 * this pte without first remap it
88 pte_clear(&init_mm
, vaddr
, kmap_pte
-idx
);
89 local_flush_tlb_one(vaddr
);
92 kmap_atomic_idx_pop();
95 EXPORT_SYMBOL(__kunmap_atomic
);
98 * This is the same as kmap_atomic() but can map memory that doesn't
99 * have a struct page associated with it.
101 void *kmap_atomic_pfn(unsigned long pfn
)
108 type
= kmap_atomic_idx_push();
109 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
110 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
111 set_pte(kmap_pte
-idx
, pfn_pte(pfn
, PAGE_KERNEL
));
112 flush_tlb_one(vaddr
);
114 return (void*) vaddr
;
117 struct page
*kmap_atomic_to_page(void *ptr
)
119 unsigned long idx
, vaddr
= (unsigned long)ptr
;
122 if (vaddr
< FIXADDR_START
)
123 return virt_to_page(ptr
);
125 idx
= virt_to_fix(vaddr
);
126 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
127 return pte_page(*pte
);
130 void __init
kmap_init(void)
132 unsigned long kmap_vstart
;
134 /* cache the first kmap pte */
135 kmap_vstart
= __fix_to_virt(FIX_KMAP_BEGIN
);
136 kmap_pte
= kmap_get_fixmap_pte(kmap_vstart
);