1 #include <linux/module.h>
2 #include <linux/highmem.h>
4 #include <asm/fixmap.h>
5 #include <asm/tlbflush.h>
7 static pte_t
*kmap_pte
;
9 unsigned long highstart_pfn
, highend_pfn
;
11 void *__kmap(struct page
*page
)
16 if (!PageHighMem(page
))
17 return page_address(page
);
18 addr
= kmap_high(page
);
19 flush_tlb_one((unsigned long)addr
);
23 EXPORT_SYMBOL(__kmap
);
25 void __kunmap(struct page
*page
)
27 BUG_ON(in_interrupt());
28 if (!PageHighMem(page
))
32 EXPORT_SYMBOL(__kunmap
);
35 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
36 * no global lock is needed and because the kmap code must perform a global TLB
37 * invalidation when the kmap pool wraps.
39 * However when holding an atomic kmap is is not legal to sleep, so atomic
40 * kmaps are appropriate for short, tight code paths only.
43 void *__kmap_atomic(struct page
*page
, enum km_type type
)
45 enum fixed_addresses idx
;
48 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
50 if (!PageHighMem(page
))
51 return page_address(page
);
53 debug_kmap_atomic(type
);
54 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
55 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
56 #ifdef CONFIG_DEBUG_HIGHMEM
57 BUG_ON(!pte_none(*(kmap_pte
- idx
)));
59 set_pte(kmap_pte
-idx
, mk_pte(page
, PAGE_KERNEL
));
60 local_flush_tlb_one((unsigned long)vaddr
);
64 EXPORT_SYMBOL(__kmap_atomic
);
66 void __kunmap_atomic(void *kvaddr
, enum km_type type
)
68 #ifdef CONFIG_DEBUG_HIGHMEM
69 unsigned long vaddr
= (unsigned long) kvaddr
& PAGE_MASK
;
70 enum fixed_addresses idx
= type
+ KM_TYPE_NR
*smp_processor_id();
72 if (vaddr
< FIXADDR_START
) { // FIXME
77 BUG_ON(vaddr
!= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
));
80 * force other mappings to Oops if they'll try to access
81 * this pte without first remap it
83 pte_clear(&init_mm
, vaddr
, kmap_pte
-idx
);
84 local_flush_tlb_one(vaddr
);
89 EXPORT_SYMBOL(__kunmap_atomic
);
92 * This is the same as kmap_atomic() but can map memory that doesn't
93 * have a struct page associated with it.
95 void *kmap_atomic_pfn(unsigned long pfn
, enum km_type type
)
97 enum fixed_addresses idx
;
102 debug_kmap_atomic(type
);
103 idx
= type
+ KM_TYPE_NR
*smp_processor_id();
104 vaddr
= __fix_to_virt(FIX_KMAP_BEGIN
+ idx
);
105 set_pte(kmap_pte
-idx
, pfn_pte(pfn
, PAGE_KERNEL
));
106 flush_tlb_one(vaddr
);
108 return (void*) vaddr
;
111 struct page
*__kmap_atomic_to_page(void *ptr
)
113 unsigned long idx
, vaddr
= (unsigned long)ptr
;
116 if (vaddr
< FIXADDR_START
)
117 return virt_to_page(ptr
);
119 idx
= virt_to_fix(vaddr
);
120 pte
= kmap_pte
- (idx
- FIX_KMAP_BEGIN
);
121 return pte_page(*pte
);
124 void __init
kmap_init(void)
126 unsigned long kmap_vstart
;
128 /* cache the first kmap pte */
129 kmap_vstart
= __fix_to_virt(FIX_KMAP_BEGIN
);
130 kmap_pte
= kmap_get_fixmap_pte(kmap_vstart
);